1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
42 #include "diagnostic-core.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
53 #include "tm-constrs.h"
57 #include "dwarf2out.h"
58 #include "sched-int.h"
63 enum upper_128bits_state
70 typedef struct block_info_def
72 /* State of the upper 128bits of AVX registers at exit. */
73 enum upper_128bits_state state;
74 /* TRUE if state of the upper 128bits of AVX registers is unchanged
77 /* TRUE if block has been processed. */
79 /* TRUE if block has been scanned. */
81 /* Previous state of the upper 128bits of AVX registers at entry. */
82 enum upper_128bits_state prev;
85 #define BLOCK_INFO(B) ((block_info) (B)->aux)
87 enum call_avx256_state
89 /* Callee returns 256bit AVX register. */
90 callee_return_avx256 = -1,
91 /* Callee returns and passes 256bit AVX register. */
92 callee_return_pass_avx256,
93 /* Callee passes 256bit AVX register. */
95 /* Callee doesn't return nor passe 256bit AVX register, or no
96 256bit AVX register in function return. */
98 /* vzeroupper intrinsic. */
102 /* Check if a 256bit AVX register is referenced in stores. */
105 check_avx256_stores (rtx dest, const_rtx set, void *data)
108 && VALID_AVX256_REG_MODE (GET_MODE (dest)))
109 || (GET_CODE (set) == SET
110 && REG_P (SET_SRC (set))
111 && VALID_AVX256_REG_MODE (GET_MODE (SET_SRC (set)))))
113 enum upper_128bits_state *state
114 = (enum upper_128bits_state *) data;
119 /* Helper function for move_or_delete_vzeroupper_1. Look for vzeroupper
120 in basic block BB. Delete it if upper 128bit AVX registers are
121 unused. If it isn't deleted, move it to just before a jump insn.
123 STATE is state of the upper 128bits of AVX registers at entry. */
126 move_or_delete_vzeroupper_2 (basic_block bb,
127 enum upper_128bits_state state)
130 rtx vzeroupper_insn = NULL_RTX;
135 if (BLOCK_INFO (bb)->unchanged)
138 fprintf (dump_file, " [bb %i] unchanged: upper 128bits: %d\n",
141 BLOCK_INFO (bb)->state = state;
145 if (BLOCK_INFO (bb)->scanned && BLOCK_INFO (bb)->prev == state)
148 fprintf (dump_file, " [bb %i] scanned: upper 128bits: %d\n",
149 bb->index, BLOCK_INFO (bb)->state);
153 BLOCK_INFO (bb)->prev = state;
156 fprintf (dump_file, " [bb %i] entry: upper 128bits: %d\n",
161 /* BB_END changes when it is deleted. */
162 bb_end = BB_END (bb);
164 while (insn != bb_end)
166 insn = NEXT_INSN (insn);
168 if (!NONDEBUG_INSN_P (insn))
171 /* Move vzeroupper before jump/call. */
172 if (JUMP_P (insn) || CALL_P (insn))
174 if (!vzeroupper_insn)
177 if (PREV_INSN (insn) != vzeroupper_insn)
181 fprintf (dump_file, "Move vzeroupper after:\n");
182 print_rtl_single (dump_file, PREV_INSN (insn));
183 fprintf (dump_file, "before:\n");
184 print_rtl_single (dump_file, insn);
186 reorder_insns_nobb (vzeroupper_insn, vzeroupper_insn,
189 vzeroupper_insn = NULL_RTX;
193 pat = PATTERN (insn);
195 /* Check insn for vzeroupper intrinsic. */
196 if (GET_CODE (pat) == UNSPEC_VOLATILE
197 && XINT (pat, 1) == UNSPECV_VZEROUPPER)
201 /* Found vzeroupper intrinsic. */
202 fprintf (dump_file, "Found vzeroupper:\n");
203 print_rtl_single (dump_file, insn);
208 /* Check insn for vzeroall intrinsic. */
209 if (GET_CODE (pat) == PARALLEL
210 && GET_CODE (XVECEXP (pat, 0, 0)) == UNSPEC_VOLATILE
211 && XINT (XVECEXP (pat, 0, 0), 1) == UNSPECV_VZEROALL)
216 /* Delete pending vzeroupper insertion. */
219 delete_insn (vzeroupper_insn);
220 vzeroupper_insn = NULL_RTX;
223 else if (state != used)
225 note_stores (pat, check_avx256_stores, &state);
232 /* Process vzeroupper intrinsic. */
233 avx256 = INTVAL (XVECEXP (pat, 0, 0));
237 /* Since the upper 128bits are cleared, callee must not pass
238 256bit AVX register. We only need to check if callee
239 returns 256bit AVX register. */
240 if (avx256 == callee_return_avx256)
246 /* Remove unnecessary vzeroupper since upper 128bits are
250 fprintf (dump_file, "Delete redundant vzeroupper:\n");
251 print_rtl_single (dump_file, insn);
257 /* Set state to UNUSED if callee doesn't return 256bit AVX
259 if (avx256 != callee_return_pass_avx256)
262 if (avx256 == callee_return_pass_avx256
263 || avx256 == callee_pass_avx256)
265 /* Must remove vzeroupper since callee passes in 256bit
269 fprintf (dump_file, "Delete callee pass vzeroupper:\n");
270 print_rtl_single (dump_file, insn);
276 vzeroupper_insn = insn;
282 BLOCK_INFO (bb)->state = state;
283 BLOCK_INFO (bb)->unchanged = unchanged;
284 BLOCK_INFO (bb)->scanned = true;
287 fprintf (dump_file, " [bb %i] exit: %s: upper 128bits: %d\n",
288 bb->index, unchanged ? "unchanged" : "changed",
292 /* Helper function for move_or_delete_vzeroupper. Process vzeroupper
293 in BLOCK and check its predecessor blocks. Treat UNKNOWN state
294 as USED if UNKNOWN_IS_UNUSED is true. Return TRUE if the exit
298 move_or_delete_vzeroupper_1 (basic_block block, bool unknown_is_unused)
302 enum upper_128bits_state state, old_state, new_state;
306 fprintf (dump_file, " Process [bb %i]: status: %d\n",
307 block->index, BLOCK_INFO (block)->processed);
309 if (BLOCK_INFO (block)->processed)
314 /* Check all predecessor edges of this block. */
315 seen_unknown = false;
316 FOR_EACH_EDGE (e, ei, block->preds)
320 switch (BLOCK_INFO (e->src)->state)
323 if (!unknown_is_unused)
337 old_state = BLOCK_INFO (block)->state;
338 move_or_delete_vzeroupper_2 (block, state);
339 new_state = BLOCK_INFO (block)->state;
341 if (state != unknown || new_state == used)
342 BLOCK_INFO (block)->processed = true;
344 /* Need to rescan if the upper 128bits of AVX registers are changed
346 if (new_state != old_state)
348 if (new_state == used)
349 cfun->machine->rescan_vzeroupper_p = 1;
356 /* Go through the instruction stream looking for vzeroupper. Delete
357 it if upper 128bit AVX registers are unused. If it isn't deleted,
358 move it to just before a jump insn. */
361 move_or_delete_vzeroupper (void)
366 fibheap_t worklist, pending, fibheap_swap;
367 sbitmap visited, in_worklist, in_pending, sbitmap_swap;
372 /* Set up block info for each basic block. */
373 alloc_aux_for_blocks (sizeof (struct block_info_def));
375 /* Process outgoing edges of entry point. */
377 fprintf (dump_file, "Process outgoing edges of entry point\n");
379 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
381 move_or_delete_vzeroupper_2 (e->dest,
382 cfun->machine->caller_pass_avx256_p
384 BLOCK_INFO (e->dest)->processed = true;
387 /* Compute reverse completion order of depth first search of the CFG
388 so that the data-flow runs faster. */
389 rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
390 bb_order = XNEWVEC (int, last_basic_block);
391 pre_and_rev_post_order_compute (NULL, rc_order, false);
392 for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
393 bb_order[rc_order[i]] = i;
396 worklist = fibheap_new ();
397 pending = fibheap_new ();
398 visited = sbitmap_alloc (last_basic_block);
399 in_worklist = sbitmap_alloc (last_basic_block);
400 in_pending = sbitmap_alloc (last_basic_block);
401 sbitmap_zero (in_worklist);
403 /* Don't check outgoing edges of entry point. */
404 sbitmap_ones (in_pending);
406 if (BLOCK_INFO (bb)->processed)
407 RESET_BIT (in_pending, bb->index);
410 move_or_delete_vzeroupper_1 (bb, false);
411 fibheap_insert (pending, bb_order[bb->index], bb);
415 fprintf (dump_file, "Check remaining basic blocks\n");
417 while (!fibheap_empty (pending))
419 fibheap_swap = pending;
421 worklist = fibheap_swap;
422 sbitmap_swap = in_pending;
423 in_pending = in_worklist;
424 in_worklist = sbitmap_swap;
426 sbitmap_zero (visited);
428 cfun->machine->rescan_vzeroupper_p = 0;
430 while (!fibheap_empty (worklist))
432 bb = (basic_block) fibheap_extract_min (worklist);
433 RESET_BIT (in_worklist, bb->index);
434 gcc_assert (!TEST_BIT (visited, bb->index));
435 if (!TEST_BIT (visited, bb->index))
439 SET_BIT (visited, bb->index);
441 if (move_or_delete_vzeroupper_1 (bb, false))
442 FOR_EACH_EDGE (e, ei, bb->succs)
444 if (e->dest == EXIT_BLOCK_PTR
445 || BLOCK_INFO (e->dest)->processed)
448 if (TEST_BIT (visited, e->dest->index))
450 if (!TEST_BIT (in_pending, e->dest->index))
452 /* Send E->DEST to next round. */
453 SET_BIT (in_pending, e->dest->index);
454 fibheap_insert (pending,
455 bb_order[e->dest->index],
459 else if (!TEST_BIT (in_worklist, e->dest->index))
461 /* Add E->DEST to current round. */
462 SET_BIT (in_worklist, e->dest->index);
463 fibheap_insert (worklist, bb_order[e->dest->index],
470 if (!cfun->machine->rescan_vzeroupper_p)
475 fibheap_delete (worklist);
476 fibheap_delete (pending);
477 sbitmap_free (visited);
478 sbitmap_free (in_worklist);
479 sbitmap_free (in_pending);
482 fprintf (dump_file, "Process remaining basic blocks\n");
485 move_or_delete_vzeroupper_1 (bb, true);
487 free_aux_for_blocks ();
490 static rtx legitimize_dllimport_symbol (rtx, bool);
492 #ifndef CHECK_STACK_LIMIT
493 #define CHECK_STACK_LIMIT (-1)
496 /* Return index of given mode in mult and division cost tables. */
497 #define MODE_INDEX(mode) \
498 ((mode) == QImode ? 0 \
499 : (mode) == HImode ? 1 \
500 : (mode) == SImode ? 2 \
501 : (mode) == DImode ? 3 \
504 /* Processor costs (relative to an add) */
505 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
506 #define COSTS_N_BYTES(N) ((N) * 2)
508 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
511 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
512 COSTS_N_BYTES (2), /* cost of an add instruction */
513 COSTS_N_BYTES (3), /* cost of a lea instruction */
514 COSTS_N_BYTES (2), /* variable shift costs */
515 COSTS_N_BYTES (3), /* constant shift costs */
516 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
517 COSTS_N_BYTES (3), /* HI */
518 COSTS_N_BYTES (3), /* SI */
519 COSTS_N_BYTES (3), /* DI */
520 COSTS_N_BYTES (5)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
523 COSTS_N_BYTES (3), /* HI */
524 COSTS_N_BYTES (3), /* SI */
525 COSTS_N_BYTES (3), /* DI */
526 COSTS_N_BYTES (5)}, /* other */
527 COSTS_N_BYTES (3), /* cost of movsx */
528 COSTS_N_BYTES (3), /* cost of movzx */
529 0, /* "large" insn */
531 2, /* cost for loading QImode using movzbl */
532 {2, 2, 2}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 2, 2}, /* cost of storing integer registers */
536 2, /* cost of reg,reg fld/fst */
537 {2, 2, 2}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {2, 2, 2}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 3, /* cost of moving MMX register */
542 {3, 3}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {3, 3}, /* cost of storing MMX registers
545 in SImode and DImode */
546 3, /* cost of moving SSE register */
547 {3, 3, 3}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {3, 3, 3}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 3, /* MMX or SSE register to integer */
552 0, /* size of l1 cache */
553 0, /* size of l2 cache */
554 0, /* size of prefetch block */
555 0, /* number of parallel prefetches */
557 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
558 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
559 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
560 COSTS_N_BYTES (2), /* cost of FABS instruction. */
561 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
562 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
563 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
564 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
565 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
566 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
567 1, /* scalar_stmt_cost. */
568 1, /* scalar load_cost. */
569 1, /* scalar_store_cost. */
570 1, /* vec_stmt_cost. */
571 1, /* vec_to_scalar_cost. */
572 1, /* scalar_to_vec_cost. */
573 1, /* vec_align_load_cost. */
574 1, /* vec_unalign_load_cost. */
575 1, /* vec_store_cost. */
576 1, /* cond_taken_branch_cost. */
577 1, /* cond_not_taken_branch_cost. */
580 /* Processor costs (relative to an add) */
582 struct processor_costs i386_cost = { /* 386 specific costs */
583 COSTS_N_INSNS (1), /* cost of an add instruction */
584 COSTS_N_INSNS (1), /* cost of a lea instruction */
585 COSTS_N_INSNS (3), /* variable shift costs */
586 COSTS_N_INSNS (2), /* constant shift costs */
587 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
588 COSTS_N_INSNS (6), /* HI */
589 COSTS_N_INSNS (6), /* SI */
590 COSTS_N_INSNS (6), /* DI */
591 COSTS_N_INSNS (6)}, /* other */
592 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
593 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
594 COSTS_N_INSNS (23), /* HI */
595 COSTS_N_INSNS (23), /* SI */
596 COSTS_N_INSNS (23), /* DI */
597 COSTS_N_INSNS (23)}, /* other */
598 COSTS_N_INSNS (3), /* cost of movsx */
599 COSTS_N_INSNS (2), /* cost of movzx */
600 15, /* "large" insn */
602 4, /* cost for loading QImode using movzbl */
603 {2, 4, 2}, /* cost of loading integer registers
604 in QImode, HImode and SImode.
605 Relative to reg-reg move (2). */
606 {2, 4, 2}, /* cost of storing integer registers */
607 2, /* cost of reg,reg fld/fst */
608 {8, 8, 8}, /* cost of loading fp registers
609 in SFmode, DFmode and XFmode */
610 {8, 8, 8}, /* cost of storing fp registers
611 in SFmode, DFmode and XFmode */
612 2, /* cost of moving MMX register */
613 {4, 8}, /* cost of loading MMX registers
614 in SImode and DImode */
615 {4, 8}, /* cost of storing MMX registers
616 in SImode and DImode */
617 2, /* cost of moving SSE register */
618 {4, 8, 16}, /* cost of loading SSE registers
619 in SImode, DImode and TImode */
620 {4, 8, 16}, /* cost of storing SSE registers
621 in SImode, DImode and TImode */
622 3, /* MMX or SSE register to integer */
623 0, /* size of l1 cache */
624 0, /* size of l2 cache */
625 0, /* size of prefetch block */
626 0, /* number of parallel prefetches */
628 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
629 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
630 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
631 COSTS_N_INSNS (22), /* cost of FABS instruction. */
632 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
633 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
634 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
635 DUMMY_STRINGOP_ALGS},
636 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
637 DUMMY_STRINGOP_ALGS},
638 1, /* scalar_stmt_cost. */
639 1, /* scalar load_cost. */
640 1, /* scalar_store_cost. */
641 1, /* vec_stmt_cost. */
642 1, /* vec_to_scalar_cost. */
643 1, /* scalar_to_vec_cost. */
644 1, /* vec_align_load_cost. */
645 2, /* vec_unalign_load_cost. */
646 1, /* vec_store_cost. */
647 3, /* cond_taken_branch_cost. */
648 1, /* cond_not_taken_branch_cost. */
652 struct processor_costs i486_cost = { /* 486 specific costs */
653 COSTS_N_INSNS (1), /* cost of an add instruction */
654 COSTS_N_INSNS (1), /* cost of a lea instruction */
655 COSTS_N_INSNS (3), /* variable shift costs */
656 COSTS_N_INSNS (2), /* constant shift costs */
657 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
658 COSTS_N_INSNS (12), /* HI */
659 COSTS_N_INSNS (12), /* SI */
660 COSTS_N_INSNS (12), /* DI */
661 COSTS_N_INSNS (12)}, /* other */
662 1, /* cost of multiply per each bit set */
663 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
664 COSTS_N_INSNS (40), /* HI */
665 COSTS_N_INSNS (40), /* SI */
666 COSTS_N_INSNS (40), /* DI */
667 COSTS_N_INSNS (40)}, /* other */
668 COSTS_N_INSNS (3), /* cost of movsx */
669 COSTS_N_INSNS (2), /* cost of movzx */
670 15, /* "large" insn */
672 4, /* cost for loading QImode using movzbl */
673 {2, 4, 2}, /* cost of loading integer registers
674 in QImode, HImode and SImode.
675 Relative to reg-reg move (2). */
676 {2, 4, 2}, /* cost of storing integer registers */
677 2, /* cost of reg,reg fld/fst */
678 {8, 8, 8}, /* cost of loading fp registers
679 in SFmode, DFmode and XFmode */
680 {8, 8, 8}, /* cost of storing fp registers
681 in SFmode, DFmode and XFmode */
682 2, /* cost of moving MMX register */
683 {4, 8}, /* cost of loading MMX registers
684 in SImode and DImode */
685 {4, 8}, /* cost of storing MMX registers
686 in SImode and DImode */
687 2, /* cost of moving SSE register */
688 {4, 8, 16}, /* cost of loading SSE registers
689 in SImode, DImode and TImode */
690 {4, 8, 16}, /* cost of storing SSE registers
691 in SImode, DImode and TImode */
692 3, /* MMX or SSE register to integer */
693 4, /* size of l1 cache. 486 has 8kB cache
694 shared for code and data, so 4kB is
695 not really precise. */
696 4, /* size of l2 cache */
697 0, /* size of prefetch block */
698 0, /* number of parallel prefetches */
700 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
701 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
702 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
703 COSTS_N_INSNS (3), /* cost of FABS instruction. */
704 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
705 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
706 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
707 DUMMY_STRINGOP_ALGS},
708 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
709 DUMMY_STRINGOP_ALGS},
710 1, /* scalar_stmt_cost. */
711 1, /* scalar load_cost. */
712 1, /* scalar_store_cost. */
713 1, /* vec_stmt_cost. */
714 1, /* vec_to_scalar_cost. */
715 1, /* scalar_to_vec_cost. */
716 1, /* vec_align_load_cost. */
717 2, /* vec_unalign_load_cost. */
718 1, /* vec_store_cost. */
719 3, /* cond_taken_branch_cost. */
720 1, /* cond_not_taken_branch_cost. */
724 struct processor_costs pentium_cost = {
725 COSTS_N_INSNS (1), /* cost of an add instruction */
726 COSTS_N_INSNS (1), /* cost of a lea instruction */
727 COSTS_N_INSNS (4), /* variable shift costs */
728 COSTS_N_INSNS (1), /* constant shift costs */
729 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
730 COSTS_N_INSNS (11), /* HI */
731 COSTS_N_INSNS (11), /* SI */
732 COSTS_N_INSNS (11), /* DI */
733 COSTS_N_INSNS (11)}, /* other */
734 0, /* cost of multiply per each bit set */
735 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
736 COSTS_N_INSNS (25), /* HI */
737 COSTS_N_INSNS (25), /* SI */
738 COSTS_N_INSNS (25), /* DI */
739 COSTS_N_INSNS (25)}, /* other */
740 COSTS_N_INSNS (3), /* cost of movsx */
741 COSTS_N_INSNS (2), /* cost of movzx */
742 8, /* "large" insn */
744 6, /* cost for loading QImode using movzbl */
745 {2, 4, 2}, /* cost of loading integer registers
746 in QImode, HImode and SImode.
747 Relative to reg-reg move (2). */
748 {2, 4, 2}, /* cost of storing integer registers */
749 2, /* cost of reg,reg fld/fst */
750 {2, 2, 6}, /* cost of loading fp registers
751 in SFmode, DFmode and XFmode */
752 {4, 4, 6}, /* cost of storing fp registers
753 in SFmode, DFmode and XFmode */
754 8, /* cost of moving MMX register */
755 {8, 8}, /* cost of loading MMX registers
756 in SImode and DImode */
757 {8, 8}, /* cost of storing MMX registers
758 in SImode and DImode */
759 2, /* cost of moving SSE register */
760 {4, 8, 16}, /* cost of loading SSE registers
761 in SImode, DImode and TImode */
762 {4, 8, 16}, /* cost of storing SSE registers
763 in SImode, DImode and TImode */
764 3, /* MMX or SSE register to integer */
765 8, /* size of l1 cache. */
766 8, /* size of l2 cache */
767 0, /* size of prefetch block */
768 0, /* number of parallel prefetches */
770 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
771 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
772 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
773 COSTS_N_INSNS (1), /* cost of FABS instruction. */
774 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
775 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
776 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
777 DUMMY_STRINGOP_ALGS},
778 {{libcall, {{-1, rep_prefix_4_byte}}},
779 DUMMY_STRINGOP_ALGS},
780 1, /* scalar_stmt_cost. */
781 1, /* scalar load_cost. */
782 1, /* scalar_store_cost. */
783 1, /* vec_stmt_cost. */
784 1, /* vec_to_scalar_cost. */
785 1, /* scalar_to_vec_cost. */
786 1, /* vec_align_load_cost. */
787 2, /* vec_unalign_load_cost. */
788 1, /* vec_store_cost. */
789 3, /* cond_taken_branch_cost. */
790 1, /* cond_not_taken_branch_cost. */
794 struct processor_costs pentiumpro_cost = {
795 COSTS_N_INSNS (1), /* cost of an add instruction */
796 COSTS_N_INSNS (1), /* cost of a lea instruction */
797 COSTS_N_INSNS (1), /* variable shift costs */
798 COSTS_N_INSNS (1), /* constant shift costs */
799 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
800 COSTS_N_INSNS (4), /* HI */
801 COSTS_N_INSNS (4), /* SI */
802 COSTS_N_INSNS (4), /* DI */
803 COSTS_N_INSNS (4)}, /* other */
804 0, /* cost of multiply per each bit set */
805 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
806 COSTS_N_INSNS (17), /* HI */
807 COSTS_N_INSNS (17), /* SI */
808 COSTS_N_INSNS (17), /* DI */
809 COSTS_N_INSNS (17)}, /* other */
810 COSTS_N_INSNS (1), /* cost of movsx */
811 COSTS_N_INSNS (1), /* cost of movzx */
812 8, /* "large" insn */
814 2, /* cost for loading QImode using movzbl */
815 {4, 4, 4}, /* cost of loading integer registers
816 in QImode, HImode and SImode.
817 Relative to reg-reg move (2). */
818 {2, 2, 2}, /* cost of storing integer registers */
819 2, /* cost of reg,reg fld/fst */
820 {2, 2, 6}, /* cost of loading fp registers
821 in SFmode, DFmode and XFmode */
822 {4, 4, 6}, /* cost of storing fp registers
823 in SFmode, DFmode and XFmode */
824 2, /* cost of moving MMX register */
825 {2, 2}, /* cost of loading MMX registers
826 in SImode and DImode */
827 {2, 2}, /* cost of storing MMX registers
828 in SImode and DImode */
829 2, /* cost of moving SSE register */
830 {2, 2, 8}, /* cost of loading SSE registers
831 in SImode, DImode and TImode */
832 {2, 2, 8}, /* cost of storing SSE registers
833 in SImode, DImode and TImode */
834 3, /* MMX or SSE register to integer */
835 8, /* size of l1 cache. */
836 256, /* size of l2 cache */
837 32, /* size of prefetch block */
838 6, /* number of parallel prefetches */
840 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
841 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
842 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
843 COSTS_N_INSNS (2), /* cost of FABS instruction. */
844 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
845 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
846 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
847 (we ensure the alignment). For small blocks inline loop is still a
848 noticeable win, for bigger blocks either rep movsl or rep movsb is
849 way to go. Rep movsb has apparently more expensive startup time in CPU,
850 but after 4K the difference is down in the noise. */
851 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
852 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
853 DUMMY_STRINGOP_ALGS},
854 {{rep_prefix_4_byte, {{1024, unrolled_loop},
855 {8192, rep_prefix_4_byte}, {-1, libcall}}},
856 DUMMY_STRINGOP_ALGS},
857 1, /* scalar_stmt_cost. */
858 1, /* scalar load_cost. */
859 1, /* scalar_store_cost. */
860 1, /* vec_stmt_cost. */
861 1, /* vec_to_scalar_cost. */
862 1, /* scalar_to_vec_cost. */
863 1, /* vec_align_load_cost. */
864 2, /* vec_unalign_load_cost. */
865 1, /* vec_store_cost. */
866 3, /* cond_taken_branch_cost. */
867 1, /* cond_not_taken_branch_cost. */
871 struct processor_costs geode_cost = {
872 COSTS_N_INSNS (1), /* cost of an add instruction */
873 COSTS_N_INSNS (1), /* cost of a lea instruction */
874 COSTS_N_INSNS (2), /* variable shift costs */
875 COSTS_N_INSNS (1), /* constant shift costs */
876 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
877 COSTS_N_INSNS (4), /* HI */
878 COSTS_N_INSNS (7), /* SI */
879 COSTS_N_INSNS (7), /* DI */
880 COSTS_N_INSNS (7)}, /* other */
881 0, /* cost of multiply per each bit set */
882 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
883 COSTS_N_INSNS (23), /* HI */
884 COSTS_N_INSNS (39), /* SI */
885 COSTS_N_INSNS (39), /* DI */
886 COSTS_N_INSNS (39)}, /* other */
887 COSTS_N_INSNS (1), /* cost of movsx */
888 COSTS_N_INSNS (1), /* cost of movzx */
889 8, /* "large" insn */
891 1, /* cost for loading QImode using movzbl */
892 {1, 1, 1}, /* cost of loading integer registers
893 in QImode, HImode and SImode.
894 Relative to reg-reg move (2). */
895 {1, 1, 1}, /* cost of storing integer registers */
896 1, /* cost of reg,reg fld/fst */
897 {1, 1, 1}, /* cost of loading fp registers
898 in SFmode, DFmode and XFmode */
899 {4, 6, 6}, /* cost of storing fp registers
900 in SFmode, DFmode and XFmode */
902 1, /* cost of moving MMX register */
903 {1, 1}, /* cost of loading MMX registers
904 in SImode and DImode */
905 {1, 1}, /* cost of storing MMX registers
906 in SImode and DImode */
907 1, /* cost of moving SSE register */
908 {1, 1, 1}, /* cost of loading SSE registers
909 in SImode, DImode and TImode */
910 {1, 1, 1}, /* cost of storing SSE registers
911 in SImode, DImode and TImode */
912 1, /* MMX or SSE register to integer */
913 64, /* size of l1 cache. */
914 128, /* size of l2 cache. */
915 32, /* size of prefetch block */
916 1, /* number of parallel prefetches */
918 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
919 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
920 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
921 COSTS_N_INSNS (1), /* cost of FABS instruction. */
922 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
923 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
924 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
925 DUMMY_STRINGOP_ALGS},
926 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
927 DUMMY_STRINGOP_ALGS},
928 1, /* scalar_stmt_cost. */
929 1, /* scalar load_cost. */
930 1, /* scalar_store_cost. */
931 1, /* vec_stmt_cost. */
932 1, /* vec_to_scalar_cost. */
933 1, /* scalar_to_vec_cost. */
934 1, /* vec_align_load_cost. */
935 2, /* vec_unalign_load_cost. */
936 1, /* vec_store_cost. */
937 3, /* cond_taken_branch_cost. */
938 1, /* cond_not_taken_branch_cost. */
942 struct processor_costs k6_cost = {
943 COSTS_N_INSNS (1), /* cost of an add instruction */
944 COSTS_N_INSNS (2), /* cost of a lea instruction */
945 COSTS_N_INSNS (1), /* variable shift costs */
946 COSTS_N_INSNS (1), /* constant shift costs */
947 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
948 COSTS_N_INSNS (3), /* HI */
949 COSTS_N_INSNS (3), /* SI */
950 COSTS_N_INSNS (3), /* DI */
951 COSTS_N_INSNS (3)}, /* other */
952 0, /* cost of multiply per each bit set */
953 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
954 COSTS_N_INSNS (18), /* HI */
955 COSTS_N_INSNS (18), /* SI */
956 COSTS_N_INSNS (18), /* DI */
957 COSTS_N_INSNS (18)}, /* other */
958 COSTS_N_INSNS (2), /* cost of movsx */
959 COSTS_N_INSNS (2), /* cost of movzx */
960 8, /* "large" insn */
962 3, /* cost for loading QImode using movzbl */
963 {4, 5, 4}, /* cost of loading integer registers
964 in QImode, HImode and SImode.
965 Relative to reg-reg move (2). */
966 {2, 3, 2}, /* cost of storing integer registers */
967 4, /* cost of reg,reg fld/fst */
968 {6, 6, 6}, /* cost of loading fp registers
969 in SFmode, DFmode and XFmode */
970 {4, 4, 4}, /* cost of storing fp registers
971 in SFmode, DFmode and XFmode */
972 2, /* cost of moving MMX register */
973 {2, 2}, /* cost of loading MMX registers
974 in SImode and DImode */
975 {2, 2}, /* cost of storing MMX registers
976 in SImode and DImode */
977 2, /* cost of moving SSE register */
978 {2, 2, 8}, /* cost of loading SSE registers
979 in SImode, DImode and TImode */
980 {2, 2, 8}, /* cost of storing SSE registers
981 in SImode, DImode and TImode */
982 6, /* MMX or SSE register to integer */
983 32, /* size of l1 cache. */
984 32, /* size of l2 cache. Some models
985 have integrated l2 cache, but
986 optimizing for k6 is not important
987 enough to worry about that. */
988 32, /* size of prefetch block */
989 1, /* number of parallel prefetches */
991 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
992 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
993 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
994 COSTS_N_INSNS (2), /* cost of FABS instruction. */
995 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
996 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
997 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
998 DUMMY_STRINGOP_ALGS},
999 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
1000 DUMMY_STRINGOP_ALGS},
1001 1, /* scalar_stmt_cost. */
1002 1, /* scalar load_cost. */
1003 1, /* scalar_store_cost. */
1004 1, /* vec_stmt_cost. */
1005 1, /* vec_to_scalar_cost. */
1006 1, /* scalar_to_vec_cost. */
1007 1, /* vec_align_load_cost. */
1008 2, /* vec_unalign_load_cost. */
1009 1, /* vec_store_cost. */
1010 3, /* cond_taken_branch_cost. */
1011 1, /* cond_not_taken_branch_cost. */
1015 struct processor_costs athlon_cost = {
1016 COSTS_N_INSNS (1), /* cost of an add instruction */
1017 COSTS_N_INSNS (2), /* cost of a lea instruction */
1018 COSTS_N_INSNS (1), /* variable shift costs */
1019 COSTS_N_INSNS (1), /* constant shift costs */
1020 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
1021 COSTS_N_INSNS (5), /* HI */
1022 COSTS_N_INSNS (5), /* SI */
1023 COSTS_N_INSNS (5), /* DI */
1024 COSTS_N_INSNS (5)}, /* other */
1025 0, /* cost of multiply per each bit set */
1026 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1027 COSTS_N_INSNS (26), /* HI */
1028 COSTS_N_INSNS (42), /* SI */
1029 COSTS_N_INSNS (74), /* DI */
1030 COSTS_N_INSNS (74)}, /* other */
1031 COSTS_N_INSNS (1), /* cost of movsx */
1032 COSTS_N_INSNS (1), /* cost of movzx */
1033 8, /* "large" insn */
1035 4, /* cost for loading QImode using movzbl */
1036 {3, 4, 3}, /* cost of loading integer registers
1037 in QImode, HImode and SImode.
1038 Relative to reg-reg move (2). */
1039 {3, 4, 3}, /* cost of storing integer registers */
1040 4, /* cost of reg,reg fld/fst */
1041 {4, 4, 12}, /* cost of loading fp registers
1042 in SFmode, DFmode and XFmode */
1043 {6, 6, 8}, /* cost of storing fp registers
1044 in SFmode, DFmode and XFmode */
1045 2, /* cost of moving MMX register */
1046 {4, 4}, /* cost of loading MMX registers
1047 in SImode and DImode */
1048 {4, 4}, /* cost of storing MMX registers
1049 in SImode and DImode */
1050 2, /* cost of moving SSE register */
1051 {4, 4, 6}, /* cost of loading SSE registers
1052 in SImode, DImode and TImode */
1053 {4, 4, 5}, /* cost of storing SSE registers
1054 in SImode, DImode and TImode */
1055 5, /* MMX or SSE register to integer */
1056 64, /* size of l1 cache. */
1057 256, /* size of l2 cache. */
1058 64, /* size of prefetch block */
1059 6, /* number of parallel prefetches */
1060 5, /* Branch cost */
1061 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1062 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1063 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
1064 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1065 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1066 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1067 /* For some reason, Athlon deals better with REP prefix (relative to loops)
1068 compared to K8. Alignment becomes important after 8 bytes for memcpy and
1069 128 bytes for memset. */
1070 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1071 DUMMY_STRINGOP_ALGS},
1072 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1073 DUMMY_STRINGOP_ALGS},
1074 1, /* scalar_stmt_cost. */
1075 1, /* scalar load_cost. */
1076 1, /* scalar_store_cost. */
1077 1, /* vec_stmt_cost. */
1078 1, /* vec_to_scalar_cost. */
1079 1, /* scalar_to_vec_cost. */
1080 1, /* vec_align_load_cost. */
1081 2, /* vec_unalign_load_cost. */
1082 1, /* vec_store_cost. */
1083 3, /* cond_taken_branch_cost. */
1084 1, /* cond_not_taken_branch_cost. */
1088 struct processor_costs k8_cost = {
1089 COSTS_N_INSNS (1), /* cost of an add instruction */
1090 COSTS_N_INSNS (2), /* cost of a lea instruction */
1091 COSTS_N_INSNS (1), /* variable shift costs */
1092 COSTS_N_INSNS (1), /* constant shift costs */
1093 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1094 COSTS_N_INSNS (4), /* HI */
1095 COSTS_N_INSNS (3), /* SI */
1096 COSTS_N_INSNS (4), /* DI */
1097 COSTS_N_INSNS (5)}, /* other */
1098 0, /* cost of multiply per each bit set */
1099 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1100 COSTS_N_INSNS (26), /* HI */
1101 COSTS_N_INSNS (42), /* SI */
1102 COSTS_N_INSNS (74), /* DI */
1103 COSTS_N_INSNS (74)}, /* other */
1104 COSTS_N_INSNS (1), /* cost of movsx */
1105 COSTS_N_INSNS (1), /* cost of movzx */
1106 8, /* "large" insn */
1108 4, /* cost for loading QImode using movzbl */
1109 {3, 4, 3}, /* cost of loading integer registers
1110 in QImode, HImode and SImode.
1111 Relative to reg-reg move (2). */
1112 {3, 4, 3}, /* cost of storing integer registers */
1113 4, /* cost of reg,reg fld/fst */
1114 {4, 4, 12}, /* cost of loading fp registers
1115 in SFmode, DFmode and XFmode */
1116 {6, 6, 8}, /* cost of storing fp registers
1117 in SFmode, DFmode and XFmode */
1118 2, /* cost of moving MMX register */
1119 {3, 3}, /* cost of loading MMX registers
1120 in SImode and DImode */
1121 {4, 4}, /* cost of storing MMX registers
1122 in SImode and DImode */
1123 2, /* cost of moving SSE register */
1124 {4, 3, 6}, /* cost of loading SSE registers
1125 in SImode, DImode and TImode */
1126 {4, 4, 5}, /* cost of storing SSE registers
1127 in SImode, DImode and TImode */
1128 5, /* MMX or SSE register to integer */
1129 64, /* size of l1 cache. */
1130 512, /* size of l2 cache. */
1131 64, /* size of prefetch block */
1132 /* New AMD processors never drop prefetches; if they cannot be performed
1133 immediately, they are queued. We set number of simultaneous prefetches
1134 to a large constant to reflect this (it probably is not a good idea not
1135 to limit number of prefetches at all, as their execution also takes some
1137 100, /* number of parallel prefetches */
1138 3, /* Branch cost */
1139 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1140 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1141 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1142 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1143 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1144 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1145 /* K8 has optimized REP instruction for medium sized blocks, but for very
1146 small blocks it is better to use loop. For large blocks, libcall can
1147 do nontemporary accesses and beat inline considerably. */
1148 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1149 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1150 {{libcall, {{8, loop}, {24, unrolled_loop},
1151 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1152 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1153 4, /* scalar_stmt_cost. */
1154 2, /* scalar load_cost. */
1155 2, /* scalar_store_cost. */
1156 5, /* vec_stmt_cost. */
1157 0, /* vec_to_scalar_cost. */
1158 2, /* scalar_to_vec_cost. */
1159 2, /* vec_align_load_cost. */
1160 3, /* vec_unalign_load_cost. */
1161 3, /* vec_store_cost. */
1162 3, /* cond_taken_branch_cost. */
1163 2, /* cond_not_taken_branch_cost. */
1166 struct processor_costs amdfam10_cost = {
1167 COSTS_N_INSNS (1), /* cost of an add instruction */
1168 COSTS_N_INSNS (2), /* cost of a lea instruction */
1169 COSTS_N_INSNS (1), /* variable shift costs */
1170 COSTS_N_INSNS (1), /* constant shift costs */
1171 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1172 COSTS_N_INSNS (4), /* HI */
1173 COSTS_N_INSNS (3), /* SI */
1174 COSTS_N_INSNS (4), /* DI */
1175 COSTS_N_INSNS (5)}, /* other */
1176 0, /* cost of multiply per each bit set */
1177 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1178 COSTS_N_INSNS (35), /* HI */
1179 COSTS_N_INSNS (51), /* SI */
1180 COSTS_N_INSNS (83), /* DI */
1181 COSTS_N_INSNS (83)}, /* other */
1182 COSTS_N_INSNS (1), /* cost of movsx */
1183 COSTS_N_INSNS (1), /* cost of movzx */
1184 8, /* "large" insn */
1186 4, /* cost for loading QImode using movzbl */
1187 {3, 4, 3}, /* cost of loading integer registers
1188 in QImode, HImode and SImode.
1189 Relative to reg-reg move (2). */
1190 {3, 4, 3}, /* cost of storing integer registers */
1191 4, /* cost of reg,reg fld/fst */
1192 {4, 4, 12}, /* cost of loading fp registers
1193 in SFmode, DFmode and XFmode */
1194 {6, 6, 8}, /* cost of storing fp registers
1195 in SFmode, DFmode and XFmode */
1196 2, /* cost of moving MMX register */
1197 {3, 3}, /* cost of loading MMX registers
1198 in SImode and DImode */
1199 {4, 4}, /* cost of storing MMX registers
1200 in SImode and DImode */
1201 2, /* cost of moving SSE register */
1202 {4, 4, 3}, /* cost of loading SSE registers
1203 in SImode, DImode and TImode */
1204 {4, 4, 5}, /* cost of storing SSE registers
1205 in SImode, DImode and TImode */
1206 3, /* MMX or SSE register to integer */
1208 MOVD reg64, xmmreg Double FSTORE 4
1209 MOVD reg32, xmmreg Double FSTORE 4
1211 MOVD reg64, xmmreg Double FADD 3
1213 MOVD reg32, xmmreg Double FADD 3
1215 64, /* size of l1 cache. */
1216 512, /* size of l2 cache. */
1217 64, /* size of prefetch block */
1218 /* New AMD processors never drop prefetches; if they cannot be performed
1219 immediately, they are queued. We set number of simultaneous prefetches
1220 to a large constant to reflect this (it probably is not a good idea not
1221 to limit number of prefetches at all, as their execution also takes some
1223 100, /* number of parallel prefetches */
1224 2, /* Branch cost */
1225 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1226 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1227 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1228 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1229 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1230 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1232 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
1233 very small blocks it is better to use loop. For large blocks, libcall can
1234 do nontemporary accesses and beat inline considerably. */
1235 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1236 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1237 {{libcall, {{8, loop}, {24, unrolled_loop},
1238 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1239 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1240 4, /* scalar_stmt_cost. */
1241 2, /* scalar load_cost. */
1242 2, /* scalar_store_cost. */
1243 6, /* vec_stmt_cost. */
1244 0, /* vec_to_scalar_cost. */
1245 2, /* scalar_to_vec_cost. */
1246 2, /* vec_align_load_cost. */
1247 2, /* vec_unalign_load_cost. */
1248 2, /* vec_store_cost. */
1249 2, /* cond_taken_branch_cost. */
1250 1, /* cond_not_taken_branch_cost. */
1253 struct processor_costs bdver1_cost = {
1254 COSTS_N_INSNS (1), /* cost of an add instruction */
1255 COSTS_N_INSNS (1), /* cost of a lea instruction */
1256 COSTS_N_INSNS (1), /* variable shift costs */
1257 COSTS_N_INSNS (1), /* constant shift costs */
1258 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1259 COSTS_N_INSNS (4), /* HI */
1260 COSTS_N_INSNS (4), /* SI */
1261 COSTS_N_INSNS (6), /* DI */
1262 COSTS_N_INSNS (6)}, /* other */
1263 0, /* cost of multiply per each bit set */
1264 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1265 COSTS_N_INSNS (35), /* HI */
1266 COSTS_N_INSNS (51), /* SI */
1267 COSTS_N_INSNS (83), /* DI */
1268 COSTS_N_INSNS (83)}, /* other */
1269 COSTS_N_INSNS (1), /* cost of movsx */
1270 COSTS_N_INSNS (1), /* cost of movzx */
1271 8, /* "large" insn */
1273 4, /* cost for loading QImode using movzbl */
1274 {5, 5, 4}, /* cost of loading integer registers
1275 in QImode, HImode and SImode.
1276 Relative to reg-reg move (2). */
1277 {4, 4, 4}, /* cost of storing integer registers */
1278 2, /* cost of reg,reg fld/fst */
1279 {5, 5, 12}, /* cost of loading fp registers
1280 in SFmode, DFmode and XFmode */
1281 {4, 4, 8}, /* cost of storing fp registers
1282 in SFmode, DFmode and XFmode */
1283 2, /* cost of moving MMX register */
1284 {4, 4}, /* cost of loading MMX registers
1285 in SImode and DImode */
1286 {4, 4}, /* cost of storing MMX registers
1287 in SImode and DImode */
1288 2, /* cost of moving SSE register */
1289 {4, 4, 4}, /* cost of loading SSE registers
1290 in SImode, DImode and TImode */
1291 {4, 4, 4}, /* cost of storing SSE registers
1292 in SImode, DImode and TImode */
1293 2, /* MMX or SSE register to integer */
1295 MOVD reg64, xmmreg Double FSTORE 4
1296 MOVD reg32, xmmreg Double FSTORE 4
1298 MOVD reg64, xmmreg Double FADD 3
1300 MOVD reg32, xmmreg Double FADD 3
1302 16, /* size of l1 cache. */
1303 2048, /* size of l2 cache. */
1304 64, /* size of prefetch block */
1305 /* New AMD processors never drop prefetches; if they cannot be performed
1306 immediately, they are queued. We set number of simultaneous prefetches
1307 to a large constant to reflect this (it probably is not a good idea not
1308 to limit number of prefetches at all, as their execution also takes some
1310 100, /* number of parallel prefetches */
1311 2, /* Branch cost */
1312 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1313 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1314 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1315 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1316 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1317 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1319 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
1320 very small blocks it is better to use loop. For large blocks, libcall
1321 can do nontemporary accesses and beat inline considerably. */
1322 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1323 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1324 {{libcall, {{8, loop}, {24, unrolled_loop},
1325 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1326 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1327 6, /* scalar_stmt_cost. */
1328 4, /* scalar load_cost. */
1329 4, /* scalar_store_cost. */
1330 6, /* vec_stmt_cost. */
1331 0, /* vec_to_scalar_cost. */
1332 2, /* scalar_to_vec_cost. */
1333 4, /* vec_align_load_cost. */
1334 4, /* vec_unalign_load_cost. */
1335 4, /* vec_store_cost. */
1336 2, /* cond_taken_branch_cost. */
1337 1, /* cond_not_taken_branch_cost. */
1340 struct processor_costs btver1_cost = {
1341 COSTS_N_INSNS (1), /* cost of an add instruction */
1342 COSTS_N_INSNS (2), /* cost of a lea instruction */
1343 COSTS_N_INSNS (1), /* variable shift costs */
1344 COSTS_N_INSNS (1), /* constant shift costs */
1345 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1346 COSTS_N_INSNS (4), /* HI */
1347 COSTS_N_INSNS (3), /* SI */
1348 COSTS_N_INSNS (4), /* DI */
1349 COSTS_N_INSNS (5)}, /* other */
1350 0, /* cost of multiply per each bit set */
1351 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1352 COSTS_N_INSNS (35), /* HI */
1353 COSTS_N_INSNS (51), /* SI */
1354 COSTS_N_INSNS (83), /* DI */
1355 COSTS_N_INSNS (83)}, /* other */
1356 COSTS_N_INSNS (1), /* cost of movsx */
1357 COSTS_N_INSNS (1), /* cost of movzx */
1358 8, /* "large" insn */
1360 4, /* cost for loading QImode using movzbl */
1361 {3, 4, 3}, /* cost of loading integer registers
1362 in QImode, HImode and SImode.
1363 Relative to reg-reg move (2). */
1364 {3, 4, 3}, /* cost of storing integer registers */
1365 4, /* cost of reg,reg fld/fst */
1366 {4, 4, 12}, /* cost of loading fp registers
1367 in SFmode, DFmode and XFmode */
1368 {6, 6, 8}, /* cost of storing fp registers
1369 in SFmode, DFmode and XFmode */
1370 2, /* cost of moving MMX register */
1371 {3, 3}, /* cost of loading MMX registers
1372 in SImode and DImode */
1373 {4, 4}, /* cost of storing MMX registers
1374 in SImode and DImode */
1375 2, /* cost of moving SSE register */
1376 {4, 4, 3}, /* cost of loading SSE registers
1377 in SImode, DImode and TImode */
1378 {4, 4, 5}, /* cost of storing SSE registers
1379 in SImode, DImode and TImode */
1380 3, /* MMX or SSE register to integer */
1382 MOVD reg64, xmmreg Double FSTORE 4
1383 MOVD reg32, xmmreg Double FSTORE 4
1385 MOVD reg64, xmmreg Double FADD 3
1387 MOVD reg32, xmmreg Double FADD 3
1389 32, /* size of l1 cache. */
1390 512, /* size of l2 cache. */
1391 64, /* size of prefetch block */
1392 100, /* number of parallel prefetches */
1393 2, /* Branch cost */
1394 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1395 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1396 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1397 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1398 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1399 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1401 /* BTVER1 has optimized REP instruction for medium sized blocks, but for
1402 very small blocks it is better to use loop. For large blocks, libcall can
1403 do nontemporary accesses and beat inline considerably. */
1404 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1405 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1406 {{libcall, {{8, loop}, {24, unrolled_loop},
1407 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1408 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1409 4, /* scalar_stmt_cost. */
1410 2, /* scalar load_cost. */
1411 2, /* scalar_store_cost. */
1412 6, /* vec_stmt_cost. */
1413 0, /* vec_to_scalar_cost. */
1414 2, /* scalar_to_vec_cost. */
1415 2, /* vec_align_load_cost. */
1416 2, /* vec_unalign_load_cost. */
1417 2, /* vec_store_cost. */
1418 2, /* cond_taken_branch_cost. */
1419 1, /* cond_not_taken_branch_cost. */
1423 struct processor_costs pentium4_cost = {
1424 COSTS_N_INSNS (1), /* cost of an add instruction */
1425 COSTS_N_INSNS (3), /* cost of a lea instruction */
1426 COSTS_N_INSNS (4), /* variable shift costs */
1427 COSTS_N_INSNS (4), /* constant shift costs */
1428 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
1429 COSTS_N_INSNS (15), /* HI */
1430 COSTS_N_INSNS (15), /* SI */
1431 COSTS_N_INSNS (15), /* DI */
1432 COSTS_N_INSNS (15)}, /* other */
1433 0, /* cost of multiply per each bit set */
1434 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
1435 COSTS_N_INSNS (56), /* HI */
1436 COSTS_N_INSNS (56), /* SI */
1437 COSTS_N_INSNS (56), /* DI */
1438 COSTS_N_INSNS (56)}, /* other */
1439 COSTS_N_INSNS (1), /* cost of movsx */
1440 COSTS_N_INSNS (1), /* cost of movzx */
1441 16, /* "large" insn */
1443 2, /* cost for loading QImode using movzbl */
1444 {4, 5, 4}, /* cost of loading integer registers
1445 in QImode, HImode and SImode.
1446 Relative to reg-reg move (2). */
1447 {2, 3, 2}, /* cost of storing integer registers */
1448 2, /* cost of reg,reg fld/fst */
1449 {2, 2, 6}, /* cost of loading fp registers
1450 in SFmode, DFmode and XFmode */
1451 {4, 4, 6}, /* cost of storing fp registers
1452 in SFmode, DFmode and XFmode */
1453 2, /* cost of moving MMX register */
1454 {2, 2}, /* cost of loading MMX registers
1455 in SImode and DImode */
1456 {2, 2}, /* cost of storing MMX registers
1457 in SImode and DImode */
1458 12, /* cost of moving SSE register */
1459 {12, 12, 12}, /* cost of loading SSE registers
1460 in SImode, DImode and TImode */
1461 {2, 2, 8}, /* cost of storing SSE registers
1462 in SImode, DImode and TImode */
1463 10, /* MMX or SSE register to integer */
1464 8, /* size of l1 cache. */
1465 256, /* size of l2 cache. */
1466 64, /* size of prefetch block */
1467 6, /* number of parallel prefetches */
1468 2, /* Branch cost */
1469 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
1470 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
1471 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
1472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1474 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
1475 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1476 DUMMY_STRINGOP_ALGS},
1477 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1479 DUMMY_STRINGOP_ALGS},
1480 1, /* scalar_stmt_cost. */
1481 1, /* scalar load_cost. */
1482 1, /* scalar_store_cost. */
1483 1, /* vec_stmt_cost. */
1484 1, /* vec_to_scalar_cost. */
1485 1, /* scalar_to_vec_cost. */
1486 1, /* vec_align_load_cost. */
1487 2, /* vec_unalign_load_cost. */
1488 1, /* vec_store_cost. */
1489 3, /* cond_taken_branch_cost. */
1490 1, /* cond_not_taken_branch_cost. */
1494 struct processor_costs nocona_cost = {
1495 COSTS_N_INSNS (1), /* cost of an add instruction */
1496 COSTS_N_INSNS (1), /* cost of a lea instruction */
1497 COSTS_N_INSNS (1), /* variable shift costs */
1498 COSTS_N_INSNS (1), /* constant shift costs */
1499 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
1500 COSTS_N_INSNS (10), /* HI */
1501 COSTS_N_INSNS (10), /* SI */
1502 COSTS_N_INSNS (10), /* DI */
1503 COSTS_N_INSNS (10)}, /* other */
1504 0, /* cost of multiply per each bit set */
1505 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
1506 COSTS_N_INSNS (66), /* HI */
1507 COSTS_N_INSNS (66), /* SI */
1508 COSTS_N_INSNS (66), /* DI */
1509 COSTS_N_INSNS (66)}, /* other */
1510 COSTS_N_INSNS (1), /* cost of movsx */
1511 COSTS_N_INSNS (1), /* cost of movzx */
1512 16, /* "large" insn */
1513 17, /* MOVE_RATIO */
1514 4, /* cost for loading QImode using movzbl */
1515 {4, 4, 4}, /* cost of loading integer registers
1516 in QImode, HImode and SImode.
1517 Relative to reg-reg move (2). */
1518 {4, 4, 4}, /* cost of storing integer registers */
1519 3, /* cost of reg,reg fld/fst */
1520 {12, 12, 12}, /* cost of loading fp registers
1521 in SFmode, DFmode and XFmode */
1522 {4, 4, 4}, /* cost of storing fp registers
1523 in SFmode, DFmode and XFmode */
1524 6, /* cost of moving MMX register */
1525 {12, 12}, /* cost of loading MMX registers
1526 in SImode and DImode */
1527 {12, 12}, /* cost of storing MMX registers
1528 in SImode and DImode */
1529 6, /* cost of moving SSE register */
1530 {12, 12, 12}, /* cost of loading SSE registers
1531 in SImode, DImode and TImode */
1532 {12, 12, 12}, /* cost of storing SSE registers
1533 in SImode, DImode and TImode */
1534 8, /* MMX or SSE register to integer */
1535 8, /* size of l1 cache. */
1536 1024, /* size of l2 cache. */
1537 128, /* size of prefetch block */
1538 8, /* number of parallel prefetches */
1539 1, /* Branch cost */
1540 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1541 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1542 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1543 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1544 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1545 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1546 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1547 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1548 {100000, unrolled_loop}, {-1, libcall}}}},
1549 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1551 {libcall, {{24, loop}, {64, unrolled_loop},
1552 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1553 1, /* scalar_stmt_cost. */
1554 1, /* scalar load_cost. */
1555 1, /* scalar_store_cost. */
1556 1, /* vec_stmt_cost. */
1557 1, /* vec_to_scalar_cost. */
1558 1, /* scalar_to_vec_cost. */
1559 1, /* vec_align_load_cost. */
1560 2, /* vec_unalign_load_cost. */
1561 1, /* vec_store_cost. */
1562 3, /* cond_taken_branch_cost. */
1563 1, /* cond_not_taken_branch_cost. */
1567 struct processor_costs atom_cost = {
1568 COSTS_N_INSNS (1), /* cost of an add instruction */
1569 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1570 COSTS_N_INSNS (1), /* variable shift costs */
1571 COSTS_N_INSNS (1), /* constant shift costs */
1572 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1573 COSTS_N_INSNS (4), /* HI */
1574 COSTS_N_INSNS (3), /* SI */
1575 COSTS_N_INSNS (4), /* DI */
1576 COSTS_N_INSNS (2)}, /* other */
1577 0, /* cost of multiply per each bit set */
1578 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1579 COSTS_N_INSNS (26), /* HI */
1580 COSTS_N_INSNS (42), /* SI */
1581 COSTS_N_INSNS (74), /* DI */
1582 COSTS_N_INSNS (74)}, /* other */
1583 COSTS_N_INSNS (1), /* cost of movsx */
1584 COSTS_N_INSNS (1), /* cost of movzx */
1585 8, /* "large" insn */
1586 17, /* MOVE_RATIO */
1587 2, /* cost for loading QImode using movzbl */
1588 {4, 4, 4}, /* cost of loading integer registers
1589 in QImode, HImode and SImode.
1590 Relative to reg-reg move (2). */
1591 {4, 4, 4}, /* cost of storing integer registers */
1592 4, /* cost of reg,reg fld/fst */
1593 {12, 12, 12}, /* cost of loading fp registers
1594 in SFmode, DFmode and XFmode */
1595 {6, 6, 8}, /* cost of storing fp registers
1596 in SFmode, DFmode and XFmode */
1597 2, /* cost of moving MMX register */
1598 {8, 8}, /* cost of loading MMX registers
1599 in SImode and DImode */
1600 {8, 8}, /* cost of storing MMX registers
1601 in SImode and DImode */
1602 2, /* cost of moving SSE register */
1603 {8, 8, 8}, /* cost of loading SSE registers
1604 in SImode, DImode and TImode */
1605 {8, 8, 8}, /* cost of storing SSE registers
1606 in SImode, DImode and TImode */
1607 5, /* MMX or SSE register to integer */
1608 32, /* size of l1 cache. */
1609 256, /* size of l2 cache. */
1610 64, /* size of prefetch block */
1611 6, /* number of parallel prefetches */
1612 3, /* Branch cost */
1613 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1614 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1615 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1616 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1617 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1618 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1619 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1620 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1621 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1622 {{libcall, {{8, loop}, {15, unrolled_loop},
1623 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1624 {libcall, {{24, loop}, {32, unrolled_loop},
1625 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1626 1, /* scalar_stmt_cost. */
1627 1, /* scalar load_cost. */
1628 1, /* scalar_store_cost. */
1629 1, /* vec_stmt_cost. */
1630 1, /* vec_to_scalar_cost. */
1631 1, /* scalar_to_vec_cost. */
1632 1, /* vec_align_load_cost. */
1633 2, /* vec_unalign_load_cost. */
1634 1, /* vec_store_cost. */
1635 3, /* cond_taken_branch_cost. */
1636 1, /* cond_not_taken_branch_cost. */
1639 /* Generic64 should produce code tuned for Nocona and K8. */
1641 struct processor_costs generic64_cost = {
1642 COSTS_N_INSNS (1), /* cost of an add instruction */
1643 /* On all chips taken into consideration lea is 2 cycles and more. With
1644 this cost however our current implementation of synth_mult results in
1645 use of unnecessary temporary registers causing regression on several
1646 SPECfp benchmarks. */
1647 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1648 COSTS_N_INSNS (1), /* variable shift costs */
1649 COSTS_N_INSNS (1), /* constant shift costs */
1650 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1651 COSTS_N_INSNS (4), /* HI */
1652 COSTS_N_INSNS (3), /* SI */
1653 COSTS_N_INSNS (4), /* DI */
1654 COSTS_N_INSNS (2)}, /* other */
1655 0, /* cost of multiply per each bit set */
1656 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1657 COSTS_N_INSNS (26), /* HI */
1658 COSTS_N_INSNS (42), /* SI */
1659 COSTS_N_INSNS (74), /* DI */
1660 COSTS_N_INSNS (74)}, /* other */
1661 COSTS_N_INSNS (1), /* cost of movsx */
1662 COSTS_N_INSNS (1), /* cost of movzx */
1663 8, /* "large" insn */
1664 17, /* MOVE_RATIO */
1665 4, /* cost for loading QImode using movzbl */
1666 {4, 4, 4}, /* cost of loading integer registers
1667 in QImode, HImode and SImode.
1668 Relative to reg-reg move (2). */
1669 {4, 4, 4}, /* cost of storing integer registers */
1670 4, /* cost of reg,reg fld/fst */
1671 {12, 12, 12}, /* cost of loading fp registers
1672 in SFmode, DFmode and XFmode */
1673 {6, 6, 8}, /* cost of storing fp registers
1674 in SFmode, DFmode and XFmode */
1675 2, /* cost of moving MMX register */
1676 {8, 8}, /* cost of loading MMX registers
1677 in SImode and DImode */
1678 {8, 8}, /* cost of storing MMX registers
1679 in SImode and DImode */
1680 2, /* cost of moving SSE register */
1681 {8, 8, 8}, /* cost of loading SSE registers
1682 in SImode, DImode and TImode */
1683 {8, 8, 8}, /* cost of storing SSE registers
1684 in SImode, DImode and TImode */
1685 5, /* MMX or SSE register to integer */
1686 32, /* size of l1 cache. */
1687 512, /* size of l2 cache. */
1688 64, /* size of prefetch block */
1689 6, /* number of parallel prefetches */
1690 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1691 value is increased to perhaps more appropriate value of 5. */
1692 3, /* Branch cost */
1693 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1694 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1695 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1696 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1697 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1698 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1699 {DUMMY_STRINGOP_ALGS,
1700 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1701 {DUMMY_STRINGOP_ALGS,
1702 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1703 1, /* scalar_stmt_cost. */
1704 1, /* scalar load_cost. */
1705 1, /* scalar_store_cost. */
1706 1, /* vec_stmt_cost. */
1707 1, /* vec_to_scalar_cost. */
1708 1, /* scalar_to_vec_cost. */
1709 1, /* vec_align_load_cost. */
1710 2, /* vec_unalign_load_cost. */
1711 1, /* vec_store_cost. */
1712 3, /* cond_taken_branch_cost. */
1713 1, /* cond_not_taken_branch_cost. */
1716 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1719 struct processor_costs generic32_cost = {
1720 COSTS_N_INSNS (1), /* cost of an add instruction */
1721 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1722 COSTS_N_INSNS (1), /* variable shift costs */
1723 COSTS_N_INSNS (1), /* constant shift costs */
1724 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1725 COSTS_N_INSNS (4), /* HI */
1726 COSTS_N_INSNS (3), /* SI */
1727 COSTS_N_INSNS (4), /* DI */
1728 COSTS_N_INSNS (2)}, /* other */
1729 0, /* cost of multiply per each bit set */
1730 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1731 COSTS_N_INSNS (26), /* HI */
1732 COSTS_N_INSNS (42), /* SI */
1733 COSTS_N_INSNS (74), /* DI */
1734 COSTS_N_INSNS (74)}, /* other */
1735 COSTS_N_INSNS (1), /* cost of movsx */
1736 COSTS_N_INSNS (1), /* cost of movzx */
1737 8, /* "large" insn */
1738 17, /* MOVE_RATIO */
1739 4, /* cost for loading QImode using movzbl */
1740 {4, 4, 4}, /* cost of loading integer registers
1741 in QImode, HImode and SImode.
1742 Relative to reg-reg move (2). */
1743 {4, 4, 4}, /* cost of storing integer registers */
1744 4, /* cost of reg,reg fld/fst */
1745 {12, 12, 12}, /* cost of loading fp registers
1746 in SFmode, DFmode and XFmode */
1747 {6, 6, 8}, /* cost of storing fp registers
1748 in SFmode, DFmode and XFmode */
1749 2, /* cost of moving MMX register */
1750 {8, 8}, /* cost of loading MMX registers
1751 in SImode and DImode */
1752 {8, 8}, /* cost of storing MMX registers
1753 in SImode and DImode */
1754 2, /* cost of moving SSE register */
1755 {8, 8, 8}, /* cost of loading SSE registers
1756 in SImode, DImode and TImode */
1757 {8, 8, 8}, /* cost of storing SSE registers
1758 in SImode, DImode and TImode */
1759 5, /* MMX or SSE register to integer */
1760 32, /* size of l1 cache. */
1761 256, /* size of l2 cache. */
1762 64, /* size of prefetch block */
1763 6, /* number of parallel prefetches */
1764 3, /* Branch cost */
1765 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1766 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1767 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1768 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1769 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1770 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1771 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1772 DUMMY_STRINGOP_ALGS},
1773 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1774 DUMMY_STRINGOP_ALGS},
1775 1, /* scalar_stmt_cost. */
1776 1, /* scalar load_cost. */
1777 1, /* scalar_store_cost. */
1778 1, /* vec_stmt_cost. */
1779 1, /* vec_to_scalar_cost. */
1780 1, /* scalar_to_vec_cost. */
1781 1, /* vec_align_load_cost. */
1782 2, /* vec_unalign_load_cost. */
1783 1, /* vec_store_cost. */
1784 3, /* cond_taken_branch_cost. */
1785 1, /* cond_not_taken_branch_cost. */
1788 const struct processor_costs *ix86_cost = &pentium_cost;
1790 /* Processor feature/optimization bitmasks. */
1791 #define m_386 (1<<PROCESSOR_I386)
1792 #define m_486 (1<<PROCESSOR_I486)
1793 #define m_PENT (1<<PROCESSOR_PENTIUM)
1794 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1795 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1796 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1797 #define m_CORE2_32 (1<<PROCESSOR_CORE2_32)
1798 #define m_CORE2_64 (1<<PROCESSOR_CORE2_64)
1799 #define m_COREI7_32 (1<<PROCESSOR_COREI7_32)
1800 #define m_COREI7_64 (1<<PROCESSOR_COREI7_64)
1801 #define m_COREI7 (m_COREI7_32 | m_COREI7_64)
1802 #define m_CORE2I7_32 (m_CORE2_32 | m_COREI7_32)
1803 #define m_CORE2I7_64 (m_CORE2_64 | m_COREI7_64)
1804 #define m_CORE2I7 (m_CORE2I7_32 | m_CORE2I7_64)
1805 #define m_ATOM (1<<PROCESSOR_ATOM)
1807 #define m_GEODE (1<<PROCESSOR_GEODE)
1808 #define m_K6 (1<<PROCESSOR_K6)
1809 #define m_K6_GEODE (m_K6 | m_GEODE)
1810 #define m_K8 (1<<PROCESSOR_K8)
1811 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1812 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1813 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1814 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1815 #define m_BTVER1 (1<<PROCESSOR_BTVER1)
1816 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1 | m_BTVER1)
1818 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1819 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1821 /* Generic instruction choice should be common subset of supported CPUs
1822 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1823 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1825 /* Feature tests against the various tunings. */
1826 unsigned char ix86_tune_features[X86_TUNE_LAST];
1828 /* Feature tests against the various tunings used to create ix86_tune_features
1829 based on the processor mask. */
1830 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1831 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1832 negatively, so enabling for Generic64 seems like good code size
1833 tradeoff. We can't enable it for 32bit generic because it does not
1834 work well with PPro base chips. */
1835 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2I7_64 | m_GENERIC64,
1837 /* X86_TUNE_PUSH_MEMORY */
1838 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1839 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1841 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1844 /* X86_TUNE_UNROLL_STRLEN */
1845 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1846 | m_CORE2I7 | m_GENERIC,
1848 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1849 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1850 | m_CORE2I7 | m_GENERIC,
1852 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1853 on simulation result. But after P4 was made, no performance benefit
1854 was observed with branch hints. It also increases the code size.
1855 As a result, icc never generates branch hints. */
1858 /* X86_TUNE_DOUBLE_WITH_ADD */
1861 /* X86_TUNE_USE_SAHF */
1862 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_BTVER1
1863 | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1865 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1866 partial dependencies. */
1867 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1868 | m_CORE2I7 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1870 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1871 register stalls on Generic32 compilation setting as well. However
1872 in current implementation the partial register stalls are not eliminated
1873 very well - they can be introduced via subregs synthesized by combine
1874 and can happen in caller/callee saving sequences. Because this option
1875 pays back little on PPro based chips and is in conflict with partial reg
1876 dependencies used by Athlon/P4 based chips, it is better to leave it off
1877 for generic32 for now. */
1880 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1881 m_CORE2I7 | m_GENERIC,
1883 /* X86_TUNE_USE_HIMODE_FIOP */
1884 m_386 | m_486 | m_K6_GEODE,
1886 /* X86_TUNE_USE_SIMODE_FIOP */
1887 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2I7 | m_GENERIC),
1889 /* X86_TUNE_USE_MOV0 */
1892 /* X86_TUNE_USE_CLTD */
1893 ~(m_PENT | m_ATOM | m_K6 | m_CORE2I7 | m_GENERIC),
1895 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1898 /* X86_TUNE_SPLIT_LONG_MOVES */
1901 /* X86_TUNE_READ_MODIFY_WRITE */
1904 /* X86_TUNE_READ_MODIFY */
1907 /* X86_TUNE_PROMOTE_QIMODE */
1908 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1909 | m_CORE2I7 | m_GENERIC /* | m_PENT4 ? */,
1911 /* X86_TUNE_FAST_PREFIX */
1912 ~(m_PENT | m_486 | m_386),
1914 /* X86_TUNE_SINGLE_STRINGOP */
1915 m_386 | m_PENT4 | m_NOCONA,
1917 /* X86_TUNE_QIMODE_MATH */
1920 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1921 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1922 might be considered for Generic32 if our scheme for avoiding partial
1923 stalls was more effective. */
1926 /* X86_TUNE_PROMOTE_QI_REGS */
1929 /* X86_TUNE_PROMOTE_HI_REGS */
1932 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
1933 over esp addition. */
1934 m_386 | m_486 | m_PENT | m_PPRO,
1936 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
1937 over esp addition. */
1940 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
1941 over esp subtraction. */
1942 m_386 | m_486 | m_PENT | m_K6_GEODE,
1944 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
1945 over esp subtraction. */
1946 m_PENT | m_K6_GEODE,
1948 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1949 for DFmode copies */
1950 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7
1951 | m_GENERIC | m_GEODE),
1953 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1954 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1956 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1957 conflict here in between PPro/Pentium4 based chips that thread 128bit
1958 SSE registers as single units versus K8 based chips that divide SSE
1959 registers to two 64bit halves. This knob promotes all store destinations
1960 to be 128bit to allow register renaming on 128bit SSE units, but usually
1961 results in one extra microop on 64bit SSE units. Experimental results
1962 shows that disabling this option on P4 brings over 20% SPECfp regression,
1963 while enabling it on K8 brings roughly 2.4% regression that can be partly
1964 masked by careful scheduling of moves. */
1965 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7 | m_GENERIC
1966 | m_AMDFAM10 | m_BDVER1,
1968 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1969 m_AMDFAM10 | m_BDVER1 | m_BTVER1 | m_COREI7,
1971 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1972 m_BDVER1 | m_COREI7,
1974 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1977 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1978 are resolved on SSE register parts instead of whole registers, so we may
1979 maintain just lower part of scalar values in proper format leaving the
1980 upper part undefined. */
1983 /* X86_TUNE_SSE_TYPELESS_STORES */
1986 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1987 m_PPRO | m_PENT4 | m_NOCONA,
1989 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1990 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC,
1992 /* X86_TUNE_PROLOGUE_USING_MOVE */
1993 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2I7 | m_GENERIC,
1995 /* X86_TUNE_EPILOGUE_USING_MOVE */
1996 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2I7 | m_GENERIC,
1998 /* X86_TUNE_SHIFT1 */
2001 /* X86_TUNE_USE_FFREEP */
2004 /* X86_TUNE_INTER_UNIT_MOVES */
2005 ~(m_AMD_MULTIPLE | m_GENERIC),
2007 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
2008 ~(m_AMDFAM10 | m_BDVER1),
2010 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
2011 than 4 branch instructions in the 16 byte window. */
2012 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2I7
2015 /* X86_TUNE_SCHEDULE */
2016 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2I7
2019 /* X86_TUNE_USE_BT */
2020 m_AMD_MULTIPLE | m_ATOM | m_CORE2I7 | m_GENERIC,
2022 /* X86_TUNE_USE_INCDEC */
2023 ~(m_PENT4 | m_NOCONA | m_CORE2I7 | m_GENERIC | m_ATOM),
2025 /* X86_TUNE_PAD_RETURNS */
2026 m_AMD_MULTIPLE | m_CORE2I7 | m_GENERIC,
2028 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
2031 /* X86_TUNE_EXT_80387_CONSTANTS */
2032 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
2033 | m_CORE2I7 | m_GENERIC,
2035 /* X86_TUNE_SHORTEN_X87_SSE */
2038 /* X86_TUNE_AVOID_VECTOR_DECODE */
2039 m_K8 | m_CORE2I7_64 | m_GENERIC64,
2041 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
2042 and SImode multiply, but 386 and 486 do HImode multiply faster. */
2045 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
2046 vector path on AMD machines. */
2047 m_K8 | m_CORE2I7_64 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1 | m_BTVER1,
2049 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
2051 m_K8 | m_CORE2I7_64 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1 | m_BTVER1,
2053 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
2057 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
2058 but one byte longer. */
2061 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
2062 operand that cannot be represented using a modRM byte. The XOR
2063 replacement is long decoded, so this split helps here as well. */
2066 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
2068 m_AMDFAM10 | m_CORE2I7 | m_GENERIC,
2070 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
2071 from integer to FP. */
2074 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
2075 with a subsequent conditional jump instruction into a single
2076 compare-and-branch uop. */
2079 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
2080 will impact LEA instruction selection. */
2083 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
2088 /* Feature tests against the various architecture variations. */
2089 unsigned char ix86_arch_features[X86_ARCH_LAST];
2091 /* Feature tests against the various architecture variations, used to create
2092 ix86_arch_features based on the processor mask. */
2093 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
2094 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
2095 ~(m_386 | m_486 | m_PENT | m_K6),
2097 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
2100 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
2103 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
2106 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
2110 static const unsigned int x86_accumulate_outgoing_args
2111 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2I7
2114 static const unsigned int x86_arch_always_fancy_math_387
2115 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
2116 | m_NOCONA | m_CORE2I7 | m_GENERIC;
2118 static enum stringop_alg stringop_alg = no_stringop;
2120 /* In case the average insn count for single function invocation is
2121 lower than this constant, emit fast (but longer) prologue and
2123 #define FAST_PROLOGUE_INSN_COUNT 20
2125 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
2126 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
2127 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
2128 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
2130 /* Array of the smallest class containing reg number REGNO, indexed by
2131 REGNO. Used by REGNO_REG_CLASS in i386.h. */
2133 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
2135 /* ax, dx, cx, bx */
2136 AREG, DREG, CREG, BREG,
2137 /* si, di, bp, sp */
2138 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
2140 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
2141 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
2144 /* flags, fpsr, fpcr, frame */
2145 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
2147 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2150 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
2153 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2154 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2155 /* SSE REX registers */
2156 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2160 /* The "default" register map used in 32bit mode. */
2162 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
2164 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
2165 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
2166 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2167 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
2168 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
2169 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2170 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2173 /* The "default" register map used in 64bit mode. */
2175 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
2177 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
2178 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
2179 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2180 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
2181 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
2182 8,9,10,11,12,13,14,15, /* extended integer registers */
2183 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
2186 /* Define the register numbers to be used in Dwarf debugging information.
2187 The SVR4 reference port C compiler uses the following register numbers
2188 in its Dwarf output code:
2189 0 for %eax (gcc regno = 0)
2190 1 for %ecx (gcc regno = 2)
2191 2 for %edx (gcc regno = 1)
2192 3 for %ebx (gcc regno = 3)
2193 4 for %esp (gcc regno = 7)
2194 5 for %ebp (gcc regno = 6)
2195 6 for %esi (gcc regno = 4)
2196 7 for %edi (gcc regno = 5)
2197 The following three DWARF register numbers are never generated by
2198 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
2199 believes these numbers have these meanings.
2200 8 for %eip (no gcc equivalent)
2201 9 for %eflags (gcc regno = 17)
2202 10 for %trapno (no gcc equivalent)
2203 It is not at all clear how we should number the FP stack registers
2204 for the x86 architecture. If the version of SDB on x86/svr4 were
2205 a bit less brain dead with respect to floating-point then we would
2206 have a precedent to follow with respect to DWARF register numbers
2207 for x86 FP registers, but the SDB on x86/svr4 is so completely
2208 broken with respect to FP registers that it is hardly worth thinking
2209 of it as something to strive for compatibility with.
2210 The version of x86/svr4 SDB I have at the moment does (partially)
2211 seem to believe that DWARF register number 11 is associated with
2212 the x86 register %st(0), but that's about all. Higher DWARF
2213 register numbers don't seem to be associated with anything in
2214 particular, and even for DWARF regno 11, SDB only seems to under-
2215 stand that it should say that a variable lives in %st(0) (when
2216 asked via an `=' command) if we said it was in DWARF regno 11,
2217 but SDB still prints garbage when asked for the value of the
2218 variable in question (via a `/' command).
2219 (Also note that the labels SDB prints for various FP stack regs
2220 when doing an `x' command are all wrong.)
2221 Note that these problems generally don't affect the native SVR4
2222 C compiler because it doesn't allow the use of -O with -g and
2223 because when it is *not* optimizing, it allocates a memory
2224 location for each floating-point variable, and the memory
2225 location is what gets described in the DWARF AT_location
2226 attribute for the variable in question.
2227 Regardless of the severe mental illness of the x86/svr4 SDB, we
2228 do something sensible here and we use the following DWARF
2229 register numbers. Note that these are all stack-top-relative
2231 11 for %st(0) (gcc regno = 8)
2232 12 for %st(1) (gcc regno = 9)
2233 13 for %st(2) (gcc regno = 10)
2234 14 for %st(3) (gcc regno = 11)
2235 15 for %st(4) (gcc regno = 12)
2236 16 for %st(5) (gcc regno = 13)
2237 17 for %st(6) (gcc regno = 14)
2238 18 for %st(7) (gcc regno = 15)
2240 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
2242 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
2243 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
2244 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2245 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
2246 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
2247 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2248 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2251 /* Define parameter passing and return registers. */
2253 static int const x86_64_int_parameter_registers[6] =
2255 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
2258 static int const x86_64_ms_abi_int_parameter_registers[4] =
2260 CX_REG, DX_REG, R8_REG, R9_REG
2263 static int const x86_64_int_return_registers[4] =
2265 AX_REG, DX_REG, DI_REG, SI_REG
2268 /* Define the structure for the machine field in struct function. */
2270 struct GTY(()) stack_local_entry {
2271 unsigned short mode;
2274 struct stack_local_entry *next;
2277 /* Structure describing stack frame layout.
2278 Stack grows downward:
2284 saved static chain if ix86_static_chain_on_stack
2286 saved frame pointer if frame_pointer_needed
2287 <- HARD_FRAME_POINTER
2293 <- sse_regs_save_offset
2296 [va_arg registers] |
2300 [padding2] | = to_allocate
2309 int outgoing_arguments_size;
2310 HOST_WIDE_INT frame;
2312 /* The offsets relative to ARG_POINTER. */
2313 HOST_WIDE_INT frame_pointer_offset;
2314 HOST_WIDE_INT hard_frame_pointer_offset;
2315 HOST_WIDE_INT stack_pointer_offset;
2316 HOST_WIDE_INT hfp_save_offset;
2317 HOST_WIDE_INT reg_save_offset;
2318 HOST_WIDE_INT sse_reg_save_offset;
2320 /* When save_regs_using_mov is set, emit prologue using
2321 move instead of push instructions. */
2322 bool save_regs_using_mov;
2325 /* Code model option. */
2326 enum cmodel ix86_cmodel;
2328 enum asm_dialect ix86_asm_dialect = ASM_ATT;
2330 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
2332 /* Which unit we are generating floating point math for. */
2333 enum fpmath_unit ix86_fpmath;
2335 /* Which cpu are we scheduling for. */
2336 enum attr_cpu ix86_schedule;
2338 /* Which cpu are we optimizing for. */
2339 enum processor_type ix86_tune;
2341 /* Which instruction set architecture to use. */
2342 enum processor_type ix86_arch;
2344 /* true if sse prefetch instruction is not NOOP. */
2345 int x86_prefetch_sse;
2347 /* ix86_regparm_string as a number */
2348 static int ix86_regparm;
2350 /* -mstackrealign option */
2351 static const char ix86_force_align_arg_pointer_string[]
2352 = "force_align_arg_pointer";
2354 static rtx (*ix86_gen_leave) (void);
2355 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
2356 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
2357 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
2358 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
2359 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
2360 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
2361 static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
2362 static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
2363 static rtx (*ix86_gen_probe_stack_range) (rtx, rtx, rtx);
2365 /* Preferred alignment for stack boundary in bits. */
2366 unsigned int ix86_preferred_stack_boundary;
2368 /* Alignment for incoming stack boundary in bits specified at
2370 static unsigned int ix86_user_incoming_stack_boundary;
2372 /* Default alignment for incoming stack boundary in bits. */
2373 static unsigned int ix86_default_incoming_stack_boundary;
2375 /* Alignment for incoming stack boundary in bits. */
2376 unsigned int ix86_incoming_stack_boundary;
2378 /* The abi used by target. */
2379 enum calling_abi ix86_abi;
2381 /* Values 1-5: see jump.c */
2382 int ix86_branch_cost;
2384 /* Calling abi specific va_list type nodes. */
2385 static GTY(()) tree sysv_va_list_type_node;
2386 static GTY(()) tree ms_va_list_type_node;
2388 /* Variables which are this size or smaller are put in the data/bss
2389 or ldata/lbss sections. */
2391 int ix86_section_threshold = 65536;
2393 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
2394 char internal_label_prefix[16];
2395 int internal_label_prefix_len;
2397 /* Fence to use after loop using movnt. */
2400 /* Register class used for passing given 64bit part of the argument.
2401 These represent classes as documented by the PS ABI, with the exception
2402 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
2403 use SF or DFmode move instead of DImode to avoid reformatting penalties.
2405 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
2406 whenever possible (upper half does contain padding). */
2407 enum x86_64_reg_class
2410 X86_64_INTEGER_CLASS,
2411 X86_64_INTEGERSI_CLASS,
2418 X86_64_COMPLEX_X87_CLASS,
2422 #define MAX_CLASSES 4
2424 /* Table of constants used by fldpi, fldln2, etc.... */
2425 static REAL_VALUE_TYPE ext_80387_constants_table [5];
2426 static bool ext_80387_constants_init = 0;
2429 static struct machine_function * ix86_init_machine_status (void);
2430 static rtx ix86_function_value (const_tree, const_tree, bool);
2431 static bool ix86_function_value_regno_p (const unsigned int);
2432 static unsigned int ix86_function_arg_boundary (enum machine_mode,
2434 static rtx ix86_static_chain (const_tree, bool);
2435 static int ix86_function_regparm (const_tree, const_tree);
2436 static void ix86_compute_frame_layout (struct ix86_frame *);
2437 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
2439 static void ix86_add_new_builtins (int);
2440 static rtx ix86_expand_vec_perm_builtin (tree);
2441 static tree ix86_canonical_va_list_type (tree);
2442 static void predict_jump (int);
2443 static unsigned int split_stack_prologue_scratch_regno (void);
2444 static bool i386_asm_output_addr_const_extra (FILE *, rtx);
2446 enum ix86_function_specific_strings
2448 IX86_FUNCTION_SPECIFIC_ARCH,
2449 IX86_FUNCTION_SPECIFIC_TUNE,
2450 IX86_FUNCTION_SPECIFIC_FPMATH,
2451 IX86_FUNCTION_SPECIFIC_MAX
2454 static char *ix86_target_string (int, int, const char *, const char *,
2455 const char *, bool);
2456 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
2457 static void ix86_function_specific_save (struct cl_target_option *);
2458 static void ix86_function_specific_restore (struct cl_target_option *);
2459 static void ix86_function_specific_print (FILE *, int,
2460 struct cl_target_option *);
2461 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2462 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2463 static bool ix86_can_inline_p (tree, tree);
2464 static void ix86_set_current_function (tree);
2465 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2467 static enum calling_abi ix86_function_abi (const_tree);
2470 #ifndef SUBTARGET32_DEFAULT_CPU
2471 #define SUBTARGET32_DEFAULT_CPU "i386"
2474 /* The svr4 ABI for the i386 says that records and unions are returned
2476 #ifndef DEFAULT_PCC_STRUCT_RETURN
2477 #define DEFAULT_PCC_STRUCT_RETURN 1
2480 /* Whether -mtune= or -march= were specified */
2481 static int ix86_tune_defaulted;
2482 static int ix86_arch_specified;
2484 /* Define a set of ISAs which are available when a given ISA is
2485 enabled. MMX and SSE ISAs are handled separately. */
2487 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2488 #define OPTION_MASK_ISA_3DNOW_SET \
2489 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2491 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2492 #define OPTION_MASK_ISA_SSE2_SET \
2493 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2494 #define OPTION_MASK_ISA_SSE3_SET \
2495 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2496 #define OPTION_MASK_ISA_SSSE3_SET \
2497 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2498 #define OPTION_MASK_ISA_SSE4_1_SET \
2499 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2500 #define OPTION_MASK_ISA_SSE4_2_SET \
2501 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2502 #define OPTION_MASK_ISA_AVX_SET \
2503 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2504 #define OPTION_MASK_ISA_FMA_SET \
2505 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2507 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2509 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2511 #define OPTION_MASK_ISA_SSE4A_SET \
2512 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2513 #define OPTION_MASK_ISA_FMA4_SET \
2514 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2515 | OPTION_MASK_ISA_AVX_SET)
2516 #define OPTION_MASK_ISA_XOP_SET \
2517 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2518 #define OPTION_MASK_ISA_LWP_SET \
2521 /* AES and PCLMUL need SSE2 because they use xmm registers */
2522 #define OPTION_MASK_ISA_AES_SET \
2523 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2524 #define OPTION_MASK_ISA_PCLMUL_SET \
2525 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2527 #define OPTION_MASK_ISA_ABM_SET \
2528 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2530 #define OPTION_MASK_ISA_BMI_SET OPTION_MASK_ISA_BMI
2531 #define OPTION_MASK_ISA_TBM_SET OPTION_MASK_ISA_TBM
2532 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2533 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2534 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2535 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2536 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2538 #define OPTION_MASK_ISA_FSGSBASE_SET OPTION_MASK_ISA_FSGSBASE
2539 #define OPTION_MASK_ISA_RDRND_SET OPTION_MASK_ISA_RDRND
2540 #define OPTION_MASK_ISA_F16C_SET \
2541 (OPTION_MASK_ISA_F16C | OPTION_MASK_ISA_AVX_SET)
2543 /* Define a set of ISAs which aren't available when a given ISA is
2544 disabled. MMX and SSE ISAs are handled separately. */
2546 #define OPTION_MASK_ISA_MMX_UNSET \
2547 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2548 #define OPTION_MASK_ISA_3DNOW_UNSET \
2549 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2550 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2552 #define OPTION_MASK_ISA_SSE_UNSET \
2553 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2554 #define OPTION_MASK_ISA_SSE2_UNSET \
2555 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2556 #define OPTION_MASK_ISA_SSE3_UNSET \
2557 (OPTION_MASK_ISA_SSE3 \
2558 | OPTION_MASK_ISA_SSSE3_UNSET \
2559 | OPTION_MASK_ISA_SSE4A_UNSET )
2560 #define OPTION_MASK_ISA_SSSE3_UNSET \
2561 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2562 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2563 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2564 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2565 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2566 #define OPTION_MASK_ISA_AVX_UNSET \
2567 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2568 | OPTION_MASK_ISA_FMA4_UNSET | OPTION_MASK_ISA_F16C_UNSET)
2569 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2571 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2573 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2575 #define OPTION_MASK_ISA_SSE4A_UNSET \
2576 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2578 #define OPTION_MASK_ISA_FMA4_UNSET \
2579 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2580 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2581 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2583 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2584 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2585 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2586 #define OPTION_MASK_ISA_BMI_UNSET OPTION_MASK_ISA_BMI
2587 #define OPTION_MASK_ISA_TBM_UNSET OPTION_MASK_ISA_TBM
2588 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2589 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2590 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2591 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2592 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2594 #define OPTION_MASK_ISA_FSGSBASE_UNSET OPTION_MASK_ISA_FSGSBASE
2595 #define OPTION_MASK_ISA_RDRND_UNSET OPTION_MASK_ISA_RDRND
2596 #define OPTION_MASK_ISA_F16C_UNSET OPTION_MASK_ISA_F16C
2598 /* Vectorization library interface and handlers. */
2599 static tree (*ix86_veclib_handler) (enum built_in_function, tree, tree);
2601 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2602 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2604 /* Processor target table, indexed by processor number */
2607 const struct processor_costs *cost; /* Processor costs */
2608 const int align_loop; /* Default alignments. */
2609 const int align_loop_max_skip;
2610 const int align_jump;
2611 const int align_jump_max_skip;
2612 const int align_func;
2615 static const struct ptt processor_target_table[PROCESSOR_max] =
2617 {&i386_cost, 4, 3, 4, 3, 4},
2618 {&i486_cost, 16, 15, 16, 15, 16},
2619 {&pentium_cost, 16, 7, 16, 7, 16},
2620 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2621 {&geode_cost, 0, 0, 0, 0, 0},
2622 {&k6_cost, 32, 7, 32, 7, 32},
2623 {&athlon_cost, 16, 7, 16, 7, 16},
2624 {&pentium4_cost, 0, 0, 0, 0, 0},
2625 {&k8_cost, 16, 7, 16, 7, 16},
2626 {&nocona_cost, 0, 0, 0, 0, 0},
2627 /* Core 2 32-bit. */
2628 {&generic32_cost, 16, 10, 16, 10, 16},
2629 /* Core 2 64-bit. */
2630 {&generic64_cost, 16, 10, 16, 10, 16},
2631 /* Core i7 32-bit. */
2632 {&generic32_cost, 16, 10, 16, 10, 16},
2633 /* Core i7 64-bit. */
2634 {&generic64_cost, 16, 10, 16, 10, 16},
2635 {&generic32_cost, 16, 7, 16, 7, 16},
2636 {&generic64_cost, 16, 10, 16, 10, 16},
2637 {&amdfam10_cost, 32, 24, 32, 7, 32},
2638 {&bdver1_cost, 32, 24, 32, 7, 32},
2639 {&btver1_cost, 32, 24, 32, 7, 32},
2640 {&atom_cost, 16, 7, 16, 7, 16}
2643 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2672 /* Return true if a red-zone is in use. */
2675 ix86_using_red_zone (void)
2677 return TARGET_RED_ZONE && !TARGET_64BIT_MS_ABI;
2680 /* Implement TARGET_HANDLE_OPTION. */
2683 ix86_handle_option (struct gcc_options *opts,
2684 struct gcc_options *opts_set ATTRIBUTE_UNUSED,
2685 const struct cl_decoded_option *decoded,
2686 location_t loc ATTRIBUTE_UNUSED)
2688 size_t code = decoded->opt_index;
2689 int value = decoded->value;
2696 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2697 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2701 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2702 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2709 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2710 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2714 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2715 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2725 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2726 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2730 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2731 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2738 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2739 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2743 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2744 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2751 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2752 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2756 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2757 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2764 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2765 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2769 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2770 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2777 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2778 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2782 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2783 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2790 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2791 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2795 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2796 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2803 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2804 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2808 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2809 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2816 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2817 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2821 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2822 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2827 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2828 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2832 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2833 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2839 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2840 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2844 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2845 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2852 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2853 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2857 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2858 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2865 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2866 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2870 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2871 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2878 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2879 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2883 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2884 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2891 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2892 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2896 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2897 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2904 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_BMI_SET;
2905 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_SET;
2909 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_BMI_UNSET;
2910 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_BMI_UNSET;
2917 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_TBM_SET;
2918 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_SET;
2922 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_TBM_UNSET;
2923 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_TBM_UNSET;
2930 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2931 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2935 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2936 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2943 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2944 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2948 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2949 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2956 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2957 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2961 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2962 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2969 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2970 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2974 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2975 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2982 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2983 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2987 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2988 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2995 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2996 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
3000 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
3001 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
3008 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
3009 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
3013 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
3014 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
3021 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE_SET;
3022 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_SET;
3026 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_FSGSBASE_UNSET;
3027 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_FSGSBASE_UNSET;
3034 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_RDRND_SET;
3035 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_SET;
3039 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_RDRND_UNSET;
3040 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_RDRND_UNSET;
3047 opts->x_ix86_isa_flags |= OPTION_MASK_ISA_F16C_SET;
3048 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_SET;
3052 opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_F16C_UNSET;
3053 opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_F16C_UNSET;
3062 /* Return a string that documents the current -m options. The caller is
3063 responsible for freeing the string. */
3066 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
3067 const char *fpmath, bool add_nl_p)
3069 struct ix86_target_opts
3071 const char *option; /* option string */
3072 int mask; /* isa mask options */
3075 /* This table is ordered so that options like -msse4.2 that imply
3076 preceding options while match those first. */
3077 static struct ix86_target_opts isa_opts[] =
3079 { "-m64", OPTION_MASK_ISA_64BIT },
3080 { "-mfma4", OPTION_MASK_ISA_FMA4 },
3081 { "-mfma", OPTION_MASK_ISA_FMA },
3082 { "-mxop", OPTION_MASK_ISA_XOP },
3083 { "-mlwp", OPTION_MASK_ISA_LWP },
3084 { "-msse4a", OPTION_MASK_ISA_SSE4A },
3085 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
3086 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
3087 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
3088 { "-msse3", OPTION_MASK_ISA_SSE3 },
3089 { "-msse2", OPTION_MASK_ISA_SSE2 },
3090 { "-msse", OPTION_MASK_ISA_SSE },
3091 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
3092 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
3093 { "-mmmx", OPTION_MASK_ISA_MMX },
3094 { "-mabm", OPTION_MASK_ISA_ABM },
3095 { "-mbmi", OPTION_MASK_ISA_BMI },
3096 { "-mtbm", OPTION_MASK_ISA_TBM },
3097 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
3098 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
3099 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
3100 { "-maes", OPTION_MASK_ISA_AES },
3101 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
3102 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE },
3103 { "-mrdrnd", OPTION_MASK_ISA_RDRND },
3104 { "-mf16c", OPTION_MASK_ISA_F16C },
3108 static struct ix86_target_opts flag_opts[] =
3110 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
3111 { "-m80387", MASK_80387 },
3112 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
3113 { "-malign-double", MASK_ALIGN_DOUBLE },
3114 { "-mcld", MASK_CLD },
3115 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
3116 { "-mieee-fp", MASK_IEEE_FP },
3117 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
3118 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
3119 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
3120 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
3121 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
3122 { "-mno-push-args", MASK_NO_PUSH_ARGS },
3123 { "-mno-red-zone", MASK_NO_RED_ZONE },
3124 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
3125 { "-mrecip", MASK_RECIP },
3126 { "-mrtd", MASK_RTD },
3127 { "-msseregparm", MASK_SSEREGPARM },
3128 { "-mstack-arg-probe", MASK_STACK_PROBE },
3129 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
3130 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS },
3131 { "-m8bit-idiv", MASK_USE_8BIT_IDIV },
3132 { "-mvzeroupper", MASK_VZEROUPPER },
3133 { "-mavx256-split-unaligned-load", MASK_AVX256_SPLIT_UNALIGNED_LOAD},
3134 { "-mavx256-split-unaligned-store", MASK_AVX256_SPLIT_UNALIGNED_STORE},
3137 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
3140 char target_other[40];
3149 memset (opts, '\0', sizeof (opts));
3151 /* Add -march= option. */
3154 opts[num][0] = "-march=";
3155 opts[num++][1] = arch;
3158 /* Add -mtune= option. */
3161 opts[num][0] = "-mtune=";
3162 opts[num++][1] = tune;
3165 /* Pick out the options in isa options. */
3166 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
3168 if ((isa & isa_opts[i].mask) != 0)
3170 opts[num++][0] = isa_opts[i].option;
3171 isa &= ~ isa_opts[i].mask;
3175 if (isa && add_nl_p)
3177 opts[num++][0] = isa_other;
3178 sprintf (isa_other, "(other isa: %#x)", isa);
3181 /* Add flag options. */
3182 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
3184 if ((flags & flag_opts[i].mask) != 0)
3186 opts[num++][0] = flag_opts[i].option;
3187 flags &= ~ flag_opts[i].mask;
3191 if (flags && add_nl_p)
3193 opts[num++][0] = target_other;
3194 sprintf (target_other, "(other flags: %#x)", flags);
3197 /* Add -fpmath= option. */
3200 opts[num][0] = "-mfpmath=";
3201 opts[num++][1] = fpmath;
3208 gcc_assert (num < ARRAY_SIZE (opts));
3210 /* Size the string. */
3212 sep_len = (add_nl_p) ? 3 : 1;
3213 for (i = 0; i < num; i++)
3216 for (j = 0; j < 2; j++)
3218 len += strlen (opts[i][j]);
3221 /* Build the string. */
3222 ret = ptr = (char *) xmalloc (len);
3225 for (i = 0; i < num; i++)
3229 for (j = 0; j < 2; j++)
3230 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
3237 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
3245 for (j = 0; j < 2; j++)
3248 memcpy (ptr, opts[i][j], len2[j]);
3250 line_len += len2[j];
3255 gcc_assert (ret + len >= ptr);
3260 /* Return TRUE if software prefetching is beneficial for the
3264 software_prefetching_beneficial_p (void)
3268 case PROCESSOR_GEODE:
3270 case PROCESSOR_ATHLON:
3272 case PROCESSOR_AMDFAM10:
3273 case PROCESSOR_BTVER1:
3281 /* Return true, if profiling code should be emitted before
3282 prologue. Otherwise it returns false.
3283 Note: For x86 with "hotfix" it is sorried. */
3285 ix86_profile_before_prologue (void)
3287 return flag_fentry != 0;
3290 /* Function that is callable from the debugger to print the current
3293 ix86_debug_options (void)
3295 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
3296 ix86_arch_string, ix86_tune_string,
3297 ix86_fpmath_string, true);
3301 fprintf (stderr, "%s\n\n", opts);
3305 fputs ("<no options>\n\n", stderr);
3310 /* Override various settings based on options. If MAIN_ARGS_P, the
3311 options are from the command line, otherwise they are from
3315 ix86_option_override_internal (bool main_args_p)
3318 unsigned int ix86_arch_mask, ix86_tune_mask;
3319 const bool ix86_tune_specified = (ix86_tune_string != NULL);
3324 /* Comes from final.c -- no real reason to change it. */
3325 #define MAX_CODE_ALIGN 16
3333 PTA_PREFETCH_SSE = 1 << 4,
3335 PTA_3DNOW_A = 1 << 6,
3339 PTA_POPCNT = 1 << 10,
3341 PTA_SSE4A = 1 << 12,
3342 PTA_NO_SAHF = 1 << 13,
3343 PTA_SSE4_1 = 1 << 14,
3344 PTA_SSE4_2 = 1 << 15,
3346 PTA_PCLMUL = 1 << 17,
3349 PTA_MOVBE = 1 << 20,
3353 PTA_FSGSBASE = 1 << 24,
3354 PTA_RDRND = 1 << 25,
3358 /* if this reaches 32, need to widen struct pta flags below */
3363 const char *const name; /* processor name or nickname. */
3364 const enum processor_type processor;
3365 const enum attr_cpu schedule;
3366 const unsigned /*enum pta_flags*/ flags;
3368 const processor_alias_table[] =
3370 {"i386", PROCESSOR_I386, CPU_NONE, 0},
3371 {"i486", PROCESSOR_I486, CPU_NONE, 0},
3372 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3373 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
3374 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
3375 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
3376 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3377 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
3378 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
3379 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3380 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
3381 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
3382 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3384 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3386 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
3387 PTA_MMX | PTA_SSE | PTA_SSE2},
3388 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
3389 PTA_MMX |PTA_SSE | PTA_SSE2},
3390 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
3391 PTA_MMX | PTA_SSE | PTA_SSE2},
3392 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
3393 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
3394 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
3395 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3396 | PTA_CX16 | PTA_NO_SAHF},
3397 {"core2", PROCESSOR_CORE2_64, CPU_CORE2,
3398 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3399 | PTA_SSSE3 | PTA_CX16},
3400 {"corei7", PROCESSOR_COREI7_64, CPU_COREI7,
3401 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3402 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16},
3403 {"corei7-avx", PROCESSOR_COREI7_64, CPU_COREI7,
3404 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3405 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX
3406 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL},
3407 {"atom", PROCESSOR_ATOM, CPU_ATOM,
3408 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3409 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
3410 {"geode", PROCESSOR_GEODE, CPU_GEODE,
3411 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
3412 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
3413 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3414 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3415 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
3416 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3417 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
3418 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3419 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
3420 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3421 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
3422 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3423 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
3424 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3425 {"x86-64", PROCESSOR_K8, CPU_K8,
3426 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
3427 {"k8", PROCESSOR_K8, CPU_K8,
3428 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3429 | PTA_SSE2 | PTA_NO_SAHF},
3430 {"k8-sse3", PROCESSOR_K8, CPU_K8,
3431 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3432 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3433 {"opteron", PROCESSOR_K8, CPU_K8,
3434 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3435 | PTA_SSE2 | PTA_NO_SAHF},
3436 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
3437 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3438 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3439 {"athlon64", PROCESSOR_K8, CPU_K8,
3440 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3441 | PTA_SSE2 | PTA_NO_SAHF},
3442 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
3443 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3444 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3445 {"athlon-fx", PROCESSOR_K8, CPU_K8,
3446 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3447 | PTA_SSE2 | PTA_NO_SAHF},
3448 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3449 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3450 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3451 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3452 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3453 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3454 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
3455 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3456 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
3457 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_FMA4
3458 | PTA_XOP | PTA_LWP},
3459 {"btver1", PROCESSOR_BTVER1, CPU_GENERIC64,
3460 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3461 | PTA_SSSE3 | PTA_SSE4A |PTA_ABM | PTA_CX16},
3462 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
3463 0 /* flags are only used for -march switch. */ },
3464 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
3465 PTA_64BIT /* flags are only used for -march switch. */ },
3468 int const pta_size = ARRAY_SIZE (processor_alias_table);
3470 /* Set up prefix/suffix so the error messages refer to either the command
3471 line argument, or the attribute(target). */
3480 prefix = "option(\"";
3485 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3486 SUBTARGET_OVERRIDE_OPTIONS;
3489 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3490 SUBSUBTARGET_OVERRIDE_OPTIONS;
3493 /* -fPIC is the default for x86_64. */
3494 if (TARGET_MACHO && TARGET_64BIT)
3497 /* Need to check -mtune=generic first. */
3498 if (ix86_tune_string)
3500 if (!strcmp (ix86_tune_string, "generic")
3501 || !strcmp (ix86_tune_string, "i686")
3502 /* As special support for cross compilers we read -mtune=native
3503 as -mtune=generic. With native compilers we won't see the
3504 -mtune=native, as it was changed by the driver. */
3505 || !strcmp (ix86_tune_string, "native"))
3508 ix86_tune_string = "generic64";
3510 ix86_tune_string = "generic32";
3512 /* If this call is for setting the option attribute, allow the
3513 generic32/generic64 that was previously set. */
3514 else if (!main_args_p
3515 && (!strcmp (ix86_tune_string, "generic32")
3516 || !strcmp (ix86_tune_string, "generic64")))
3518 else if (!strncmp (ix86_tune_string, "generic", 7))
3519 error ("bad value (%s) for %stune=%s %s",
3520 ix86_tune_string, prefix, suffix, sw);
3521 else if (!strcmp (ix86_tune_string, "x86-64"))
3522 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated; use "
3523 "%stune=k8%s or %stune=generic%s instead as appropriate",
3524 prefix, suffix, prefix, suffix, prefix, suffix);
3528 if (ix86_arch_string)
3529 ix86_tune_string = ix86_arch_string;
3530 if (!ix86_tune_string)
3532 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
3533 ix86_tune_defaulted = 1;
3536 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3537 need to use a sensible tune option. */
3538 if (!strcmp (ix86_tune_string, "generic")
3539 || !strcmp (ix86_tune_string, "x86-64")
3540 || !strcmp (ix86_tune_string, "i686"))
3543 ix86_tune_string = "generic64";
3545 ix86_tune_string = "generic32";
3549 if (ix86_stringop_string)
3551 if (!strcmp (ix86_stringop_string, "rep_byte"))
3552 stringop_alg = rep_prefix_1_byte;
3553 else if (!strcmp (ix86_stringop_string, "libcall"))
3554 stringop_alg = libcall;
3555 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
3556 stringop_alg = rep_prefix_4_byte;
3557 else if (!strcmp (ix86_stringop_string, "rep_8byte")
3559 /* rep; movq isn't available in 32-bit code. */
3560 stringop_alg = rep_prefix_8_byte;
3561 else if (!strcmp (ix86_stringop_string, "byte_loop"))
3562 stringop_alg = loop_1_byte;
3563 else if (!strcmp (ix86_stringop_string, "loop"))
3564 stringop_alg = loop;
3565 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
3566 stringop_alg = unrolled_loop;
3568 error ("bad value (%s) for %sstringop-strategy=%s %s",
3569 ix86_stringop_string, prefix, suffix, sw);
3572 if (!ix86_arch_string)
3573 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3575 ix86_arch_specified = 1;
3577 /* Validate -mabi= value. */
3578 if (ix86_abi_string)
3580 if (strcmp (ix86_abi_string, "sysv") == 0)
3581 ix86_abi = SYSV_ABI;
3582 else if (strcmp (ix86_abi_string, "ms") == 0)
3585 error ("unknown ABI (%s) for %sabi=%s %s",
3586 ix86_abi_string, prefix, suffix, sw);
3589 ix86_abi = DEFAULT_ABI;
3591 if (ix86_cmodel_string != 0)
3593 if (!strcmp (ix86_cmodel_string, "small"))
3594 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3595 else if (!strcmp (ix86_cmodel_string, "medium"))
3596 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3597 else if (!strcmp (ix86_cmodel_string, "large"))
3598 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3600 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3601 else if (!strcmp (ix86_cmodel_string, "32"))
3602 ix86_cmodel = CM_32;
3603 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3604 ix86_cmodel = CM_KERNEL;
3606 error ("bad value (%s) for %scmodel=%s %s",
3607 ix86_cmodel_string, prefix, suffix, sw);
3611 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3612 use of rip-relative addressing. This eliminates fixups that
3613 would otherwise be needed if this object is to be placed in a
3614 DLL, and is essentially just as efficient as direct addressing. */
3615 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3616 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3617 else if (TARGET_64BIT)
3618 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3620 ix86_cmodel = CM_32;
3622 if (ix86_asm_string != 0)
3625 && !strcmp (ix86_asm_string, "intel"))
3626 ix86_asm_dialect = ASM_INTEL;
3627 else if (!strcmp (ix86_asm_string, "att"))
3628 ix86_asm_dialect = ASM_ATT;
3630 error ("bad value (%s) for %sasm=%s %s",
3631 ix86_asm_string, prefix, suffix, sw);
3633 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3634 error ("code model %qs not supported in the %s bit mode",
3635 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3636 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3637 sorry ("%i-bit mode not compiled in",
3638 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3640 for (i = 0; i < pta_size; i++)
3641 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3643 ix86_schedule = processor_alias_table[i].schedule;
3644 ix86_arch = processor_alias_table[i].processor;
3645 /* Default cpu tuning to the architecture. */
3646 ix86_tune = ix86_arch;
3648 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3649 error ("CPU you selected does not support x86-64 "
3652 if (processor_alias_table[i].flags & PTA_MMX
3653 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3654 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3655 if (processor_alias_table[i].flags & PTA_3DNOW
3656 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3657 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3658 if (processor_alias_table[i].flags & PTA_3DNOW_A
3659 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3660 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3661 if (processor_alias_table[i].flags & PTA_SSE
3662 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3663 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3664 if (processor_alias_table[i].flags & PTA_SSE2
3665 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3666 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3667 if (processor_alias_table[i].flags & PTA_SSE3
3668 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3669 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3670 if (processor_alias_table[i].flags & PTA_SSSE3
3671 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3672 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3673 if (processor_alias_table[i].flags & PTA_SSE4_1
3674 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3675 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3676 if (processor_alias_table[i].flags & PTA_SSE4_2
3677 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3678 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3679 if (processor_alias_table[i].flags & PTA_AVX
3680 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3681 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3682 if (processor_alias_table[i].flags & PTA_FMA
3683 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3684 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3685 if (processor_alias_table[i].flags & PTA_SSE4A
3686 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3687 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3688 if (processor_alias_table[i].flags & PTA_FMA4
3689 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3690 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3691 if (processor_alias_table[i].flags & PTA_XOP
3692 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3693 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3694 if (processor_alias_table[i].flags & PTA_LWP
3695 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3696 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3697 if (processor_alias_table[i].flags & PTA_ABM
3698 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3699 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3700 if (processor_alias_table[i].flags & PTA_BMI
3701 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI))
3702 ix86_isa_flags |= OPTION_MASK_ISA_BMI;
3703 if (processor_alias_table[i].flags & PTA_TBM
3704 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_TBM))
3705 ix86_isa_flags |= OPTION_MASK_ISA_TBM;
3706 if (processor_alias_table[i].flags & PTA_CX16
3707 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3708 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3709 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3710 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3711 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3712 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3713 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3714 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3715 if (processor_alias_table[i].flags & PTA_MOVBE
3716 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3717 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3718 if (processor_alias_table[i].flags & PTA_AES
3719 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3720 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3721 if (processor_alias_table[i].flags & PTA_PCLMUL
3722 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3723 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3724 if (processor_alias_table[i].flags & PTA_FSGSBASE
3725 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FSGSBASE))
3726 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE;
3727 if (processor_alias_table[i].flags & PTA_RDRND
3728 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_RDRND))
3729 ix86_isa_flags |= OPTION_MASK_ISA_RDRND;
3730 if (processor_alias_table[i].flags & PTA_F16C
3731 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_F16C))
3732 ix86_isa_flags |= OPTION_MASK_ISA_F16C;
3733 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3734 x86_prefetch_sse = true;
3739 if (!strcmp (ix86_arch_string, "generic"))
3740 error ("generic CPU can be used only for %stune=%s %s",
3741 prefix, suffix, sw);
3742 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3743 error ("bad value (%s) for %sarch=%s %s",
3744 ix86_arch_string, prefix, suffix, sw);
3746 ix86_arch_mask = 1u << ix86_arch;
3747 for (i = 0; i < X86_ARCH_LAST; ++i)
3748 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3750 for (i = 0; i < pta_size; i++)
3751 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3753 ix86_schedule = processor_alias_table[i].schedule;
3754 ix86_tune = processor_alias_table[i].processor;
3757 if (!(processor_alias_table[i].flags & PTA_64BIT))
3759 if (ix86_tune_defaulted)
3761 ix86_tune_string = "x86-64";
3762 for (i = 0; i < pta_size; i++)
3763 if (! strcmp (ix86_tune_string,
3764 processor_alias_table[i].name))
3766 ix86_schedule = processor_alias_table[i].schedule;
3767 ix86_tune = processor_alias_table[i].processor;
3770 error ("CPU you selected does not support x86-64 "
3776 /* Adjust tuning when compiling for 32-bit ABI. */
3779 case PROCESSOR_GENERIC64:
3780 ix86_tune = PROCESSOR_GENERIC32;
3781 ix86_schedule = CPU_PENTIUMPRO;
3784 case PROCESSOR_CORE2_64:
3785 ix86_tune = PROCESSOR_CORE2_32;
3788 case PROCESSOR_COREI7_64:
3789 ix86_tune = PROCESSOR_COREI7_32;
3796 /* Intel CPUs have always interpreted SSE prefetch instructions as
3797 NOPs; so, we can enable SSE prefetch instructions even when
3798 -mtune (rather than -march) points us to a processor that has them.
3799 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3800 higher processors. */
3802 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3803 x86_prefetch_sse = true;
3807 if (ix86_tune_specified && i == pta_size)
3808 error ("bad value (%s) for %stune=%s %s",
3809 ix86_tune_string, prefix, suffix, sw);
3811 ix86_tune_mask = 1u << ix86_tune;
3812 for (i = 0; i < X86_TUNE_LAST; ++i)
3813 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3815 #ifndef USE_IX86_FRAME_POINTER
3816 #define USE_IX86_FRAME_POINTER 0
3819 #ifndef USE_X86_64_FRAME_POINTER
3820 #define USE_X86_64_FRAME_POINTER 0
3823 /* Set the default values for switches whose default depends on TARGET_64BIT
3824 in case they weren't overwritten by command line options. */
3827 if (optimize > 1 && !global_options_set.x_flag_zee)
3829 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3830 flag_omit_frame_pointer = !USE_X86_64_FRAME_POINTER;
3831 if (flag_asynchronous_unwind_tables == 2)
3832 flag_unwind_tables = flag_asynchronous_unwind_tables = 1;
3833 if (flag_pcc_struct_return == 2)
3834 flag_pcc_struct_return = 0;
3838 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3839 flag_omit_frame_pointer = !(USE_IX86_FRAME_POINTER || optimize_size);
3840 if (flag_asynchronous_unwind_tables == 2)
3841 flag_asynchronous_unwind_tables = !USE_IX86_FRAME_POINTER;
3842 if (flag_pcc_struct_return == 2)
3843 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
3847 ix86_cost = &ix86_size_cost;
3849 ix86_cost = processor_target_table[ix86_tune].cost;
3851 /* Arrange to set up i386_stack_locals for all functions. */
3852 init_machine_status = ix86_init_machine_status;
3854 /* Validate -mregparm= value. */
3855 if (ix86_regparm_string)
3858 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3859 i = atoi (ix86_regparm_string);
3860 if (i < 0 || i > REGPARM_MAX)
3861 error ("%sregparm=%d%s is not between 0 and %d",
3862 prefix, i, suffix, REGPARM_MAX);
3867 ix86_regparm = REGPARM_MAX;
3869 /* If the user has provided any of the -malign-* options,
3870 warn and use that value only if -falign-* is not set.
3871 Remove this code in GCC 3.2 or later. */
3872 if (ix86_align_loops_string)
3874 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3875 prefix, suffix, suffix);
3876 if (align_loops == 0)
3878 i = atoi (ix86_align_loops_string);
3879 if (i < 0 || i > MAX_CODE_ALIGN)
3880 error ("%salign-loops=%d%s is not between 0 and %d",
3881 prefix, i, suffix, MAX_CODE_ALIGN);
3883 align_loops = 1 << i;
3887 if (ix86_align_jumps_string)
3889 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3890 prefix, suffix, suffix);
3891 if (align_jumps == 0)
3893 i = atoi (ix86_align_jumps_string);
3894 if (i < 0 || i > MAX_CODE_ALIGN)
3895 error ("%salign-loops=%d%s is not between 0 and %d",
3896 prefix, i, suffix, MAX_CODE_ALIGN);
3898 align_jumps = 1 << i;
3902 if (ix86_align_funcs_string)
3904 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3905 prefix, suffix, suffix);
3906 if (align_functions == 0)
3908 i = atoi (ix86_align_funcs_string);
3909 if (i < 0 || i > MAX_CODE_ALIGN)
3910 error ("%salign-loops=%d%s is not between 0 and %d",
3911 prefix, i, suffix, MAX_CODE_ALIGN);
3913 align_functions = 1 << i;
3917 /* Default align_* from the processor table. */
3918 if (align_loops == 0)
3920 align_loops = processor_target_table[ix86_tune].align_loop;
3921 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3923 if (align_jumps == 0)
3925 align_jumps = processor_target_table[ix86_tune].align_jump;
3926 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3928 if (align_functions == 0)
3930 align_functions = processor_target_table[ix86_tune].align_func;
3933 /* Validate -mbranch-cost= value, or provide default. */
3934 ix86_branch_cost = ix86_cost->branch_cost;
3935 if (ix86_branch_cost_string)
3937 i = atoi (ix86_branch_cost_string);
3939 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3941 ix86_branch_cost = i;
3943 if (ix86_section_threshold_string)
3945 i = atoi (ix86_section_threshold_string);
3947 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3949 ix86_section_threshold = i;
3952 if (ix86_tls_dialect_string)
3954 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3955 ix86_tls_dialect = TLS_DIALECT_GNU;
3956 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3957 ix86_tls_dialect = TLS_DIALECT_GNU2;
3959 error ("bad value (%s) for %stls-dialect=%s %s",
3960 ix86_tls_dialect_string, prefix, suffix, sw);
3963 if (ix87_precision_string)
3965 i = atoi (ix87_precision_string);
3966 if (i != 32 && i != 64 && i != 80)
3967 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3972 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3974 /* Enable by default the SSE and MMX builtins. Do allow the user to
3975 explicitly disable any of these. In particular, disabling SSE and
3976 MMX for kernel code is extremely useful. */
3977 if (!ix86_arch_specified)
3979 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3980 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3983 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3987 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3989 if (!ix86_arch_specified)
3991 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3993 /* i386 ABI does not specify red zone. It still makes sense to use it
3994 when programmer takes care to stack from being destroyed. */
3995 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3996 target_flags |= MASK_NO_RED_ZONE;
3999 /* Keep nonleaf frame pointers. */
4000 if (flag_omit_frame_pointer)
4001 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
4002 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
4003 flag_omit_frame_pointer = 1;
4005 /* If we're doing fast math, we don't care about comparison order
4006 wrt NaNs. This lets us use a shorter comparison sequence. */
4007 if (flag_finite_math_only)
4008 target_flags &= ~MASK_IEEE_FP;
4010 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
4011 since the insns won't need emulation. */
4012 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
4013 target_flags &= ~MASK_NO_FANCY_MATH_387;
4015 /* Likewise, if the target doesn't have a 387, or we've specified
4016 software floating point, don't use 387 inline intrinsics. */
4018 target_flags |= MASK_NO_FANCY_MATH_387;
4020 /* Turn on MMX builtins for -msse. */
4023 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
4024 x86_prefetch_sse = true;
4027 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
4028 if (TARGET_SSE4_2 || TARGET_ABM)
4029 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
4031 /* Validate -mpreferred-stack-boundary= value or default it to
4032 PREFERRED_STACK_BOUNDARY_DEFAULT. */
4033 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
4034 if (ix86_preferred_stack_boundary_string)
4036 int min = (TARGET_64BIT ? 4 : 2);
4037 int max = (TARGET_SEH ? 4 : 12);
4039 i = atoi (ix86_preferred_stack_boundary_string);
4040 if (i < min || i > max)
4043 error ("%spreferred-stack-boundary%s is not supported "
4044 "for this target", prefix, suffix);
4046 error ("%spreferred-stack-boundary=%d%s is not between %d and %d",
4047 prefix, i, suffix, min, max);
4050 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
4053 /* Set the default value for -mstackrealign. */
4054 if (ix86_force_align_arg_pointer == -1)
4055 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
4057 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
4059 /* Validate -mincoming-stack-boundary= value or default it to
4060 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
4061 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
4062 if (ix86_incoming_stack_boundary_string)
4064 i = atoi (ix86_incoming_stack_boundary_string);
4065 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
4066 error ("-mincoming-stack-boundary=%d is not between %d and 12",
4067 i, TARGET_64BIT ? 4 : 2);
4070 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
4071 ix86_incoming_stack_boundary
4072 = ix86_user_incoming_stack_boundary;
4076 /* Accept -msseregparm only if at least SSE support is enabled. */
4077 if (TARGET_SSEREGPARM
4079 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
4081 ix86_fpmath = TARGET_FPMATH_DEFAULT;
4082 if (ix86_fpmath_string != 0)
4084 if (! strcmp (ix86_fpmath_string, "387"))
4085 ix86_fpmath = FPMATH_387;
4086 else if (! strcmp (ix86_fpmath_string, "sse"))
4090 warning (0, "SSE instruction set disabled, using 387 arithmetics");
4091 ix86_fpmath = FPMATH_387;
4094 ix86_fpmath = FPMATH_SSE;
4096 else if (! strcmp (ix86_fpmath_string, "387,sse")
4097 || ! strcmp (ix86_fpmath_string, "387+sse")
4098 || ! strcmp (ix86_fpmath_string, "sse,387")
4099 || ! strcmp (ix86_fpmath_string, "sse+387")
4100 || ! strcmp (ix86_fpmath_string, "both"))
4104 warning (0, "SSE instruction set disabled, using 387 arithmetics");
4105 ix86_fpmath = FPMATH_387;
4107 else if (!TARGET_80387)
4109 warning (0, "387 instruction set disabled, using SSE arithmetics");
4110 ix86_fpmath = FPMATH_SSE;
4113 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
4116 error ("bad value (%s) for %sfpmath=%s %s",
4117 ix86_fpmath_string, prefix, suffix, sw);
4120 /* If the i387 is disabled, then do not return values in it. */
4122 target_flags &= ~MASK_FLOAT_RETURNS;
4124 /* Use external vectorized library in vectorizing intrinsics. */
4125 if (ix86_veclibabi_string)
4127 if (strcmp (ix86_veclibabi_string, "svml") == 0)
4128 ix86_veclib_handler = ix86_veclibabi_svml;
4129 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
4130 ix86_veclib_handler = ix86_veclibabi_acml;
4132 error ("unknown vectorization library ABI type (%s) for "
4133 "%sveclibabi=%s %s", ix86_veclibabi_string,
4134 prefix, suffix, sw);
4137 if ((!USE_IX86_FRAME_POINTER
4138 || (x86_accumulate_outgoing_args & ix86_tune_mask))
4139 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4141 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4143 /* ??? Unwind info is not correct around the CFG unless either a frame
4144 pointer is present or M_A_O_A is set. Fixing this requires rewriting
4145 unwind info generation to be aware of the CFG and propagating states
4147 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
4148 || flag_exceptions || flag_non_call_exceptions)
4149 && flag_omit_frame_pointer
4150 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
4152 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4153 warning (0, "unwind tables currently require either a frame pointer "
4154 "or %saccumulate-outgoing-args%s for correctness",
4156 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4159 /* If stack probes are required, the space used for large function
4160 arguments on the stack must also be probed, so enable
4161 -maccumulate-outgoing-args so this happens in the prologue. */
4162 if (TARGET_STACK_PROBE
4163 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
4165 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
4166 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
4167 "for correctness", prefix, suffix);
4168 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
4171 /* For sane SSE instruction set generation we need fcomi instruction.
4172 It is safe to enable all CMOVE instructions. */
4176 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
4179 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
4180 p = strchr (internal_label_prefix, 'X');
4181 internal_label_prefix_len = p - internal_label_prefix;
4185 /* When scheduling description is not available, disable scheduler pass
4186 so it won't slow down the compilation and make x87 code slower. */
4187 if (!TARGET_SCHEDULE)
4188 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
4190 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4191 ix86_cost->simultaneous_prefetches,
4192 global_options.x_param_values,
4193 global_options_set.x_param_values);
4194 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, ix86_cost->prefetch_block,
4195 global_options.x_param_values,
4196 global_options_set.x_param_values);
4197 maybe_set_param_value (PARAM_L1_CACHE_SIZE, ix86_cost->l1_cache_size,
4198 global_options.x_param_values,
4199 global_options_set.x_param_values);
4200 maybe_set_param_value (PARAM_L2_CACHE_SIZE, ix86_cost->l2_cache_size,
4201 global_options.x_param_values,
4202 global_options_set.x_param_values);
4204 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
4205 if (flag_prefetch_loop_arrays < 0
4208 && software_prefetching_beneficial_p ())
4209 flag_prefetch_loop_arrays = 1;
4211 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
4212 can be optimized to ap = __builtin_next_arg (0). */
4213 if (!TARGET_64BIT && !flag_split_stack)
4214 targetm.expand_builtin_va_start = NULL;
4218 ix86_gen_leave = gen_leave_rex64;
4219 ix86_gen_add3 = gen_adddi3;
4220 ix86_gen_sub3 = gen_subdi3;
4221 ix86_gen_sub3_carry = gen_subdi3_carry;
4222 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
4223 ix86_gen_monitor = gen_sse3_monitor64;
4224 ix86_gen_andsp = gen_anddi3;
4225 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_di;
4226 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
4227 ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
4231 ix86_gen_leave = gen_leave;
4232 ix86_gen_add3 = gen_addsi3;
4233 ix86_gen_sub3 = gen_subsi3;
4234 ix86_gen_sub3_carry = gen_subsi3_carry;
4235 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
4236 ix86_gen_monitor = gen_sse3_monitor;
4237 ix86_gen_andsp = gen_andsi3;
4238 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_si;
4239 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
4240 ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
4244 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
4246 target_flags |= MASK_CLD & ~target_flags_explicit;
4249 if (!TARGET_64BIT && flag_pic)
4251 if (flag_fentry > 0)
4252 sorry ("-mfentry isn%'t supported for 32-bit in combination "
4256 else if (TARGET_SEH)
4258 if (flag_fentry == 0)
4259 sorry ("-mno-fentry isn%'t compatible with SEH");
4262 else if (flag_fentry < 0)
4264 #if defined(PROFILE_BEFORE_PROLOGUE)
4271 /* Save the initial options in case the user does function specific options */
4273 target_option_default_node = target_option_current_node
4274 = build_target_option_node ();
4278 /* When not optimize for size, enable vzeroupper optimization for
4279 TARGET_AVX with -fexpensive-optimizations and split 32-byte
4280 AVX unaligned load/store. */
4283 if (flag_expensive_optimizations
4284 && !(target_flags_explicit & MASK_VZEROUPPER))
4285 target_flags |= MASK_VZEROUPPER;
4286 if (!(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD))
4287 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD;
4288 if (!(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE))
4289 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE;
4294 /* Disable vzeroupper pass if TARGET_AVX is disabled. */
4295 target_flags &= ~MASK_VZEROUPPER;
4299 /* Return TRUE if VAL is passed in register with 256bit AVX modes. */
4302 function_pass_avx256_p (const_rtx val)
4307 if (REG_P (val) && VALID_AVX256_REG_MODE (GET_MODE (val)))
4310 if (GET_CODE (val) == PARALLEL)
4315 for (i = XVECLEN (val, 0) - 1; i >= 0; i--)
4317 r = XVECEXP (val, 0, i);
4318 if (GET_CODE (r) == EXPR_LIST
4320 && REG_P (XEXP (r, 0))
4321 && (GET_MODE (XEXP (r, 0)) == OImode
4322 || VALID_AVX256_REG_MODE (GET_MODE (XEXP (r, 0)))))
4330 /* Implement the TARGET_OPTION_OVERRIDE hook. */
4333 ix86_option_override (void)
4335 ix86_option_override_internal (true);
4338 /* Update register usage after having seen the compiler flags. */
4341 ix86_conditional_register_usage (void)
4346 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4348 if (fixed_regs[i] > 1)
4349 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
4350 if (call_used_regs[i] > 1)
4351 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
4354 /* The PIC register, if it exists, is fixed. */
4355 j = PIC_OFFSET_TABLE_REGNUM;
4356 if (j != INVALID_REGNUM)
4357 fixed_regs[j] = call_used_regs[j] = 1;
4359 /* The 64-bit MS_ABI changes the set of call-used registers. */
4360 if (TARGET_64BIT_MS_ABI)
4362 call_used_regs[SI_REG] = 0;
4363 call_used_regs[DI_REG] = 0;
4364 call_used_regs[XMM6_REG] = 0;
4365 call_used_regs[XMM7_REG] = 0;
4366 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4367 call_used_regs[i] = 0;
4370 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
4371 other call-clobbered regs for 64-bit. */
4374 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
4376 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4377 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
4378 && call_used_regs[i])
4379 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
4382 /* If MMX is disabled, squash the registers. */
4384 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4385 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
4386 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4388 /* If SSE is disabled, squash the registers. */
4390 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4391 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
4392 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4394 /* If the FPU is disabled, squash the registers. */
4395 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
4396 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4397 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
4398 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
4400 /* If 32-bit, squash the 64-bit registers. */
4403 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
4405 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4411 /* Save the current options */
4414 ix86_function_specific_save (struct cl_target_option *ptr)
4416 ptr->arch = ix86_arch;
4417 ptr->schedule = ix86_schedule;
4418 ptr->tune = ix86_tune;
4419 ptr->fpmath = ix86_fpmath;
4420 ptr->branch_cost = ix86_branch_cost;
4421 ptr->tune_defaulted = ix86_tune_defaulted;
4422 ptr->arch_specified = ix86_arch_specified;
4423 ptr->x_ix86_isa_flags_explicit = ix86_isa_flags_explicit;
4424 ptr->ix86_target_flags_explicit = target_flags_explicit;
4426 /* The fields are char but the variables are not; make sure the
4427 values fit in the fields. */
4428 gcc_assert (ptr->arch == ix86_arch);
4429 gcc_assert (ptr->schedule == ix86_schedule);
4430 gcc_assert (ptr->tune == ix86_tune);
4431 gcc_assert (ptr->fpmath == ix86_fpmath);
4432 gcc_assert (ptr->branch_cost == ix86_branch_cost);
4435 /* Restore the current options */
4438 ix86_function_specific_restore (struct cl_target_option *ptr)
4440 enum processor_type old_tune = ix86_tune;
4441 enum processor_type old_arch = ix86_arch;
4442 unsigned int ix86_arch_mask, ix86_tune_mask;
4445 ix86_arch = (enum processor_type) ptr->arch;
4446 ix86_schedule = (enum attr_cpu) ptr->schedule;
4447 ix86_tune = (enum processor_type) ptr->tune;
4448 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
4449 ix86_branch_cost = ptr->branch_cost;
4450 ix86_tune_defaulted = ptr->tune_defaulted;
4451 ix86_arch_specified = ptr->arch_specified;
4452 ix86_isa_flags_explicit = ptr->x_ix86_isa_flags_explicit;
4453 target_flags_explicit = ptr->ix86_target_flags_explicit;
4455 /* Recreate the arch feature tests if the arch changed */
4456 if (old_arch != ix86_arch)
4458 ix86_arch_mask = 1u << ix86_arch;
4459 for (i = 0; i < X86_ARCH_LAST; ++i)
4460 ix86_arch_features[i]
4461 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
4464 /* Recreate the tune optimization tests */
4465 if (old_tune != ix86_tune)
4467 ix86_tune_mask = 1u << ix86_tune;
4468 for (i = 0; i < X86_TUNE_LAST; ++i)
4469 ix86_tune_features[i]
4470 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
4474 /* Print the current options */
4477 ix86_function_specific_print (FILE *file, int indent,
4478 struct cl_target_option *ptr)
4481 = ix86_target_string (ptr->x_ix86_isa_flags, ptr->x_target_flags,
4482 NULL, NULL, NULL, false);
4484 fprintf (file, "%*sarch = %d (%s)\n",
4487 ((ptr->arch < TARGET_CPU_DEFAULT_max)
4488 ? cpu_names[ptr->arch]
4491 fprintf (file, "%*stune = %d (%s)\n",
4494 ((ptr->tune < TARGET_CPU_DEFAULT_max)
4495 ? cpu_names[ptr->tune]
4498 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
4499 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
4500 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
4501 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
4505 fprintf (file, "%*s%s\n", indent, "", target_string);
4506 free (target_string);
4511 /* Inner function to process the attribute((target(...))), take an argument and
4512 set the current options from the argument. If we have a list, recursively go
4516 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
4521 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
4522 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
4523 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
4524 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
4539 enum ix86_opt_type type;
4544 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
4545 IX86_ATTR_ISA ("abm", OPT_mabm),
4546 IX86_ATTR_ISA ("bmi", OPT_mbmi),
4547 IX86_ATTR_ISA ("tbm", OPT_mtbm),
4548 IX86_ATTR_ISA ("aes", OPT_maes),
4549 IX86_ATTR_ISA ("avx", OPT_mavx),
4550 IX86_ATTR_ISA ("mmx", OPT_mmmx),
4551 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
4552 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
4553 IX86_ATTR_ISA ("sse", OPT_msse),
4554 IX86_ATTR_ISA ("sse2", OPT_msse2),
4555 IX86_ATTR_ISA ("sse3", OPT_msse3),
4556 IX86_ATTR_ISA ("sse4", OPT_msse4),
4557 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
4558 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
4559 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
4560 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
4561 IX86_ATTR_ISA ("fma4", OPT_mfma4),
4562 IX86_ATTR_ISA ("xop", OPT_mxop),
4563 IX86_ATTR_ISA ("lwp", OPT_mlwp),
4564 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase),
4565 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd),
4566 IX86_ATTR_ISA ("f16c", OPT_mf16c),
4568 /* string options */
4569 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
4570 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
4571 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
4574 IX86_ATTR_YES ("cld",
4578 IX86_ATTR_NO ("fancy-math-387",
4579 OPT_mfancy_math_387,
4580 MASK_NO_FANCY_MATH_387),
4582 IX86_ATTR_YES ("ieee-fp",
4586 IX86_ATTR_YES ("inline-all-stringops",
4587 OPT_minline_all_stringops,
4588 MASK_INLINE_ALL_STRINGOPS),
4590 IX86_ATTR_YES ("inline-stringops-dynamically",
4591 OPT_minline_stringops_dynamically,
4592 MASK_INLINE_STRINGOPS_DYNAMICALLY),
4594 IX86_ATTR_NO ("align-stringops",
4595 OPT_mno_align_stringops,
4596 MASK_NO_ALIGN_STRINGOPS),
4598 IX86_ATTR_YES ("recip",
4604 /* If this is a list, recurse to get the options. */
4605 if (TREE_CODE (args) == TREE_LIST)
4609 for (; args; args = TREE_CHAIN (args))
4610 if (TREE_VALUE (args)
4611 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
4617 else if (TREE_CODE (args) != STRING_CST)
4620 /* Handle multiple arguments separated by commas. */
4621 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
4623 while (next_optstr && *next_optstr != '\0')
4625 char *p = next_optstr;
4627 char *comma = strchr (next_optstr, ',');
4628 const char *opt_string;
4629 size_t len, opt_len;
4634 enum ix86_opt_type type = ix86_opt_unknown;
4640 len = comma - next_optstr;
4641 next_optstr = comma + 1;
4649 /* Recognize no-xxx. */
4650 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
4659 /* Find the option. */
4662 for (i = 0; i < ARRAY_SIZE (attrs); i++)
4664 type = attrs[i].type;
4665 opt_len = attrs[i].len;
4666 if (ch == attrs[i].string[0]
4667 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
4668 && memcmp (p, attrs[i].string, opt_len) == 0)
4671 mask = attrs[i].mask;
4672 opt_string = attrs[i].string;
4677 /* Process the option. */
4680 error ("attribute(target(\"%s\")) is unknown", orig_p);
4684 else if (type == ix86_opt_isa)
4686 struct cl_decoded_option decoded;
4688 generate_option (opt, NULL, opt_set_p, CL_TARGET, &decoded);
4689 ix86_handle_option (&global_options, &global_options_set,
4690 &decoded, input_location);
4693 else if (type == ix86_opt_yes || type == ix86_opt_no)
4695 if (type == ix86_opt_no)
4696 opt_set_p = !opt_set_p;
4699 target_flags |= mask;
4701 target_flags &= ~mask;
4704 else if (type == ix86_opt_str)
4708 error ("option(\"%s\") was already specified", opt_string);
4712 p_strings[opt] = xstrdup (p + opt_len);
4722 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4725 ix86_valid_target_attribute_tree (tree args)
4727 const char *orig_arch_string = ix86_arch_string;
4728 const char *orig_tune_string = ix86_tune_string;
4729 const char *orig_fpmath_string = ix86_fpmath_string;
4730 int orig_tune_defaulted = ix86_tune_defaulted;
4731 int orig_arch_specified = ix86_arch_specified;
4732 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
4735 struct cl_target_option *def
4736 = TREE_TARGET_OPTION (target_option_default_node);
4738 /* Process each of the options on the chain. */
4739 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
4742 /* If the changed options are different from the default, rerun
4743 ix86_option_override_internal, and then save the options away.
4744 The string options are are attribute options, and will be undone
4745 when we copy the save structure. */
4746 if (ix86_isa_flags != def->x_ix86_isa_flags
4747 || target_flags != def->x_target_flags
4748 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4749 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4750 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4752 /* If we are using the default tune= or arch=, undo the string assigned,
4753 and use the default. */
4754 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4755 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4756 else if (!orig_arch_specified)
4757 ix86_arch_string = NULL;
4759 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4760 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4761 else if (orig_tune_defaulted)
4762 ix86_tune_string = NULL;
4764 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4765 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4766 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
4767 else if (!TARGET_64BIT && TARGET_SSE)
4768 ix86_fpmath_string = "sse,387";
4770 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4771 ix86_option_override_internal (false);
4773 /* Add any builtin functions with the new isa if any. */
4774 ix86_add_new_builtins (ix86_isa_flags);
4776 /* Save the current options unless we are validating options for
4778 t = build_target_option_node ();
4780 ix86_arch_string = orig_arch_string;
4781 ix86_tune_string = orig_tune_string;
4782 ix86_fpmath_string = orig_fpmath_string;
4784 /* Free up memory allocated to hold the strings */
4785 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4786 free (option_strings[i]);
4792 /* Hook to validate attribute((target("string"))). */
4795 ix86_valid_target_attribute_p (tree fndecl,
4796 tree ARG_UNUSED (name),
4798 int ARG_UNUSED (flags))
4800 struct cl_target_option cur_target;
4802 tree old_optimize = build_optimization_node ();
4803 tree new_target, new_optimize;
4804 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4806 /* If the function changed the optimization levels as well as setting target
4807 options, start with the optimizations specified. */
4808 if (func_optimize && func_optimize != old_optimize)
4809 cl_optimization_restore (&global_options,
4810 TREE_OPTIMIZATION (func_optimize));
4812 /* The target attributes may also change some optimization flags, so update
4813 the optimization options if necessary. */
4814 cl_target_option_save (&cur_target, &global_options);
4815 new_target = ix86_valid_target_attribute_tree (args);
4816 new_optimize = build_optimization_node ();
4823 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4825 if (old_optimize != new_optimize)
4826 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4829 cl_target_option_restore (&global_options, &cur_target);
4831 if (old_optimize != new_optimize)
4832 cl_optimization_restore (&global_options,
4833 TREE_OPTIMIZATION (old_optimize));
4839 /* Hook to determine if one function can safely inline another. */
4842 ix86_can_inline_p (tree caller, tree callee)
4845 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4846 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4848 /* If callee has no option attributes, then it is ok to inline. */
4852 /* If caller has no option attributes, but callee does then it is not ok to
4854 else if (!caller_tree)
4859 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4860 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4862 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4863 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4865 if ((caller_opts->x_ix86_isa_flags & callee_opts->x_ix86_isa_flags)
4866 != callee_opts->x_ix86_isa_flags)
4869 /* See if we have the same non-isa options. */
4870 else if (caller_opts->x_target_flags != callee_opts->x_target_flags)
4873 /* See if arch, tune, etc. are the same. */
4874 else if (caller_opts->arch != callee_opts->arch)
4877 else if (caller_opts->tune != callee_opts->tune)
4880 else if (caller_opts->fpmath != callee_opts->fpmath)
4883 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4894 /* Remember the last target of ix86_set_current_function. */
4895 static GTY(()) tree ix86_previous_fndecl;
4897 /* Establish appropriate back-end context for processing the function
4898 FNDECL. The argument might be NULL to indicate processing at top
4899 level, outside of any function scope. */
4901 ix86_set_current_function (tree fndecl)
4903 /* Only change the context if the function changes. This hook is called
4904 several times in the course of compiling a function, and we don't want to
4905 slow things down too much or call target_reinit when it isn't safe. */
4906 if (fndecl && fndecl != ix86_previous_fndecl)
4908 tree old_tree = (ix86_previous_fndecl
4909 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4912 tree new_tree = (fndecl
4913 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4916 ix86_previous_fndecl = fndecl;
4917 if (old_tree == new_tree)
4922 cl_target_option_restore (&global_options,
4923 TREE_TARGET_OPTION (new_tree));
4929 struct cl_target_option *def
4930 = TREE_TARGET_OPTION (target_option_current_node);
4932 cl_target_option_restore (&global_options, def);
4939 /* Return true if this goes in large data/bss. */
4942 ix86_in_large_data_p (tree exp)
4944 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4947 /* Functions are never large data. */
4948 if (TREE_CODE (exp) == FUNCTION_DECL)
4951 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4953 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4954 if (strcmp (section, ".ldata") == 0
4955 || strcmp (section, ".lbss") == 0)
4961 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4963 /* If this is an incomplete type with size 0, then we can't put it
4964 in data because it might be too big when completed. */
4965 if (!size || size > ix86_section_threshold)
4972 /* Switch to the appropriate section for output of DECL.
4973 DECL is either a `VAR_DECL' node or a constant of some sort.
4974 RELOC indicates whether forming the initial value of DECL requires
4975 link-time relocations. */
4977 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4981 x86_64_elf_select_section (tree decl, int reloc,
4982 unsigned HOST_WIDE_INT align)
4984 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4985 && ix86_in_large_data_p (decl))
4987 const char *sname = NULL;
4988 unsigned int flags = SECTION_WRITE;
4989 switch (categorize_decl_for_section (decl, reloc))
4994 case SECCAT_DATA_REL:
4995 sname = ".ldata.rel";
4997 case SECCAT_DATA_REL_LOCAL:
4998 sname = ".ldata.rel.local";
5000 case SECCAT_DATA_REL_RO:
5001 sname = ".ldata.rel.ro";
5003 case SECCAT_DATA_REL_RO_LOCAL:
5004 sname = ".ldata.rel.ro.local";
5008 flags |= SECTION_BSS;
5011 case SECCAT_RODATA_MERGE_STR:
5012 case SECCAT_RODATA_MERGE_STR_INIT:
5013 case SECCAT_RODATA_MERGE_CONST:
5017 case SECCAT_SRODATA:
5024 /* We don't split these for medium model. Place them into
5025 default sections and hope for best. */
5030 /* We might get called with string constants, but get_named_section
5031 doesn't like them as they are not DECLs. Also, we need to set
5032 flags in that case. */
5034 return get_section (sname, flags, NULL);
5035 return get_named_section (decl, sname, reloc);
5038 return default_elf_select_section (decl, reloc, align);
5041 /* Build up a unique section name, expressed as a
5042 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
5043 RELOC indicates whether the initial value of EXP requires
5044 link-time relocations. */
5046 static void ATTRIBUTE_UNUSED
5047 x86_64_elf_unique_section (tree decl, int reloc)
5049 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5050 && ix86_in_large_data_p (decl))
5052 const char *prefix = NULL;
5053 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
5054 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
5056 switch (categorize_decl_for_section (decl, reloc))
5059 case SECCAT_DATA_REL:
5060 case SECCAT_DATA_REL_LOCAL:
5061 case SECCAT_DATA_REL_RO:
5062 case SECCAT_DATA_REL_RO_LOCAL:
5063 prefix = one_only ? ".ld" : ".ldata";
5066 prefix = one_only ? ".lb" : ".lbss";
5069 case SECCAT_RODATA_MERGE_STR:
5070 case SECCAT_RODATA_MERGE_STR_INIT:
5071 case SECCAT_RODATA_MERGE_CONST:
5072 prefix = one_only ? ".lr" : ".lrodata";
5074 case SECCAT_SRODATA:
5081 /* We don't split these for medium model. Place them into
5082 default sections and hope for best. */
5087 const char *name, *linkonce;
5090 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
5091 name = targetm.strip_name_encoding (name);
5093 /* If we're using one_only, then there needs to be a .gnu.linkonce
5094 prefix to the section name. */
5095 linkonce = one_only ? ".gnu.linkonce" : "";
5097 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
5099 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
5103 default_unique_section (decl, reloc);
5106 #ifdef COMMON_ASM_OP
5107 /* This says how to output assembler code to declare an
5108 uninitialized external linkage data object.
5110 For medium model x86-64 we need to use .largecomm opcode for
5113 x86_elf_aligned_common (FILE *file,
5114 const char *name, unsigned HOST_WIDE_INT size,
5117 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5118 && size > (unsigned int)ix86_section_threshold)
5119 fputs (".largecomm\t", file);
5121 fputs (COMMON_ASM_OP, file);
5122 assemble_name (file, name);
5123 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5124 size, align / BITS_PER_UNIT);
5128 /* Utility function for targets to use in implementing
5129 ASM_OUTPUT_ALIGNED_BSS. */
5132 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
5133 const char *name, unsigned HOST_WIDE_INT size,
5136 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
5137 && size > (unsigned int)ix86_section_threshold)
5138 switch_to_section (get_named_section (decl, ".lbss", 0));
5140 switch_to_section (bss_section);
5141 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
5142 #ifdef ASM_DECLARE_OBJECT_NAME
5143 last_assemble_variable_decl = decl;
5144 ASM_DECLARE_OBJECT_NAME (file, name, decl);
5146 /* Standard thing is just output label for the object. */
5147 ASM_OUTPUT_LABEL (file, name);
5148 #endif /* ASM_DECLARE_OBJECT_NAME */
5149 ASM_OUTPUT_SKIP (file, size ? size : 1);
5152 static const struct default_options ix86_option_optimization_table[] =
5154 /* Turn off -fschedule-insns by default. It tends to make the
5155 problem with not enough registers even worse. */
5156 #ifdef INSN_SCHEDULING
5157 { OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
5160 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
5161 SUBTARGET_OPTIMIZATION_OPTIONS,
5163 { OPT_LEVELS_NONE, 0, NULL, 0 }
5166 /* Implement TARGET_OPTION_INIT_STRUCT. */
5169 ix86_option_init_struct (struct gcc_options *opts)
5172 /* The Darwin libraries never set errno, so we might as well
5173 avoid calling them when that's the only reason we would. */
5174 opts->x_flag_errno_math = 0;
5176 opts->x_flag_pcc_struct_return = 2;
5177 opts->x_flag_asynchronous_unwind_tables = 2;
5178 opts->x_flag_vect_cost_model = 1;
5181 /* Decide whether we must probe the stack before any space allocation
5182 on this target. It's essentially TARGET_STACK_PROBE except when
5183 -fstack-check causes the stack to be already probed differently. */
5186 ix86_target_stack_probe (void)
5188 /* Do not probe the stack twice if static stack checking is enabled. */
5189 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5192 return TARGET_STACK_PROBE;
5195 /* Decide whether we can make a sibling call to a function. DECL is the
5196 declaration of the function being targeted by the call and EXP is the
5197 CALL_EXPR representing the call. */
5200 ix86_function_ok_for_sibcall (tree decl, tree exp)
5202 tree type, decl_or_type;
5205 /* If we are generating position-independent code, we cannot sibcall
5206 optimize any indirect call, or a direct call to a global function,
5207 as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
5211 && (!decl || !targetm.binds_local_p (decl)))
5214 /* If we need to align the outgoing stack, then sibcalling would
5215 unalign the stack, which may break the called function. */
5216 if (ix86_minimum_incoming_stack_boundary (true)
5217 < PREFERRED_STACK_BOUNDARY)
5222 decl_or_type = decl;
5223 type = TREE_TYPE (decl);
5227 /* We're looking at the CALL_EXPR, we need the type of the function. */
5228 type = CALL_EXPR_FN (exp); /* pointer expression */
5229 type = TREE_TYPE (type); /* pointer type */
5230 type = TREE_TYPE (type); /* function type */
5231 decl_or_type = type;
5234 /* Check that the return value locations are the same. Like
5235 if we are returning floats on the 80387 register stack, we cannot
5236 make a sibcall from a function that doesn't return a float to a
5237 function that does or, conversely, from a function that does return
5238 a float to a function that doesn't; the necessary stack adjustment
5239 would not be executed. This is also the place we notice
5240 differences in the return value ABI. Note that it is ok for one
5241 of the functions to have void return type as long as the return
5242 value of the other is passed in a register. */
5243 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
5244 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
5246 if (STACK_REG_P (a) || STACK_REG_P (b))
5248 if (!rtx_equal_p (a, b))
5251 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
5253 /* Disable sibcall if we need to generate vzeroupper after
5255 if (TARGET_VZEROUPPER
5256 && cfun->machine->callee_return_avx256_p
5257 && !cfun->machine->caller_return_avx256_p)
5260 else if (!rtx_equal_p (a, b))
5265 /* The SYSV ABI has more call-clobbered registers;
5266 disallow sibcalls from MS to SYSV. */
5267 if (cfun->machine->call_abi == MS_ABI
5268 && ix86_function_type_abi (type) == SYSV_ABI)
5273 /* If this call is indirect, we'll need to be able to use a
5274 call-clobbered register for the address of the target function.
5275 Make sure that all such registers are not used for passing
5276 parameters. Note that DLLIMPORT functions are indirect. */
5278 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
5280 if (ix86_function_regparm (type, NULL) >= 3)
5282 /* ??? Need to count the actual number of registers to be used,
5283 not the possible number of registers. Fix later. */
5289 /* Otherwise okay. That also includes certain types of indirect calls. */
5293 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
5294 and "sseregparm" calling convention attributes;
5295 arguments as in struct attribute_spec.handler. */
5298 ix86_handle_cconv_attribute (tree *node, tree name,
5300 int flags ATTRIBUTE_UNUSED,
5303 if (TREE_CODE (*node) != FUNCTION_TYPE
5304 && TREE_CODE (*node) != METHOD_TYPE
5305 && TREE_CODE (*node) != FIELD_DECL
5306 && TREE_CODE (*node) != TYPE_DECL)
5308 warning (OPT_Wattributes, "%qE attribute only applies to functions",
5310 *no_add_attrs = true;
5314 /* Can combine regparm with all attributes but fastcall, and thiscall. */
5315 if (is_attribute_p ("regparm", name))
5319 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5321 error ("fastcall and regparm attributes are not compatible");
5324 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5326 error ("regparam and thiscall attributes are not compatible");
5329 cst = TREE_VALUE (args);
5330 if (TREE_CODE (cst) != INTEGER_CST)
5332 warning (OPT_Wattributes,
5333 "%qE attribute requires an integer constant argument",
5335 *no_add_attrs = true;
5337 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
5339 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
5341 *no_add_attrs = true;
5349 /* Do not warn when emulating the MS ABI. */
5350 if ((TREE_CODE (*node) != FUNCTION_TYPE
5351 && TREE_CODE (*node) != METHOD_TYPE)
5352 || ix86_function_type_abi (*node) != MS_ABI)
5353 warning (OPT_Wattributes, "%qE attribute ignored",
5355 *no_add_attrs = true;
5359 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
5360 if (is_attribute_p ("fastcall", name))
5362 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5364 error ("fastcall and cdecl attributes are not compatible");
5366 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5368 error ("fastcall and stdcall attributes are not compatible");
5370 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
5372 error ("fastcall and regparm attributes are not compatible");
5374 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5376 error ("fastcall and thiscall attributes are not compatible");
5380 /* Can combine stdcall with fastcall (redundant), regparm and
5382 else if (is_attribute_p ("stdcall", name))
5384 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5386 error ("stdcall and cdecl attributes are not compatible");
5388 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5390 error ("stdcall and fastcall attributes are not compatible");
5392 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5394 error ("stdcall and thiscall attributes are not compatible");
5398 /* Can combine cdecl with regparm and sseregparm. */
5399 else if (is_attribute_p ("cdecl", name))
5401 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5403 error ("stdcall and cdecl attributes are not compatible");
5405 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5407 error ("fastcall and cdecl attributes are not compatible");
5409 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5411 error ("cdecl and thiscall attributes are not compatible");
5414 else if (is_attribute_p ("thiscall", name))
5416 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
5417 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
5419 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5421 error ("stdcall and thiscall attributes are not compatible");
5423 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5425 error ("fastcall and thiscall attributes are not compatible");
5427 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5429 error ("cdecl and thiscall attributes are not compatible");
5433 /* Can combine sseregparm with all attributes. */
5438 /* This function determines from TYPE the calling-convention. */
5441 ix86_get_callcvt (const_tree type)
5443 unsigned int ret = 0;
5448 return IX86_CALLCVT_CDECL;
5450 attrs = TYPE_ATTRIBUTES (type);
5451 if (attrs != NULL_TREE)
5453 if (lookup_attribute ("cdecl", attrs))
5454 ret |= IX86_CALLCVT_CDECL;
5455 else if (lookup_attribute ("stdcall", attrs))
5456 ret |= IX86_CALLCVT_STDCALL;
5457 else if (lookup_attribute ("fastcall", attrs))
5458 ret |= IX86_CALLCVT_FASTCALL;
5459 else if (lookup_attribute ("thiscall", attrs))
5460 ret |= IX86_CALLCVT_THISCALL;
5462 /* Regparam isn't allowed for thiscall and fastcall. */
5463 if ((ret & (IX86_CALLCVT_THISCALL | IX86_CALLCVT_FASTCALL)) == 0)
5465 if (lookup_attribute ("regparm", attrs))
5466 ret |= IX86_CALLCVT_REGPARM;
5467 if (lookup_attribute ("sseregparm", attrs))
5468 ret |= IX86_CALLCVT_SSEREGPARM;
5471 if (IX86_BASE_CALLCVT(ret) != 0)
5475 is_stdarg = stdarg_p (type);
5476 if (TARGET_RTD && !is_stdarg)
5477 return IX86_CALLCVT_STDCALL | ret;
5481 || TREE_CODE (type) != METHOD_TYPE
5482 || ix86_function_type_abi (type) != MS_ABI)
5483 return IX86_CALLCVT_CDECL | ret;
5485 return IX86_CALLCVT_THISCALL;
5488 /* Return 0 if the attributes for two types are incompatible, 1 if they
5489 are compatible, and 2 if they are nearly compatible (which causes a
5490 warning to be generated). */
5493 ix86_comp_type_attributes (const_tree type1, const_tree type2)
5495 unsigned int ccvt1, ccvt2;
5497 if (TREE_CODE (type1) != FUNCTION_TYPE
5498 && TREE_CODE (type1) != METHOD_TYPE)
5501 ccvt1 = ix86_get_callcvt (type1);
5502 ccvt2 = ix86_get_callcvt (type2);
5505 if (ix86_function_regparm (type1, NULL)
5506 != ix86_function_regparm (type2, NULL))
5512 /* Return the regparm value for a function with the indicated TYPE and DECL.
5513 DECL may be NULL when calling function indirectly
5514 or considering a libcall. */
5517 ix86_function_regparm (const_tree type, const_tree decl)
5524 return (ix86_function_type_abi (type) == SYSV_ABI
5525 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
5526 ccvt = ix86_get_callcvt (type);
5527 regparm = ix86_regparm;
5529 if ((ccvt & IX86_CALLCVT_REGPARM) != 0)
5531 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
5534 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
5538 else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
5540 else if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
5543 /* Use register calling convention for local functions when possible. */
5545 && TREE_CODE (decl) == FUNCTION_DECL
5547 && !(profile_flag && !flag_fentry))
5549 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5550 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
5551 if (i && i->local && i->can_change_signature)
5553 int local_regparm, globals = 0, regno;
5555 /* Make sure no regparm register is taken by a
5556 fixed register variable. */
5557 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
5558 if (fixed_regs[local_regparm])
5561 /* We don't want to use regparm(3) for nested functions as
5562 these use a static chain pointer in the third argument. */
5563 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
5566 /* In 32-bit mode save a register for the split stack. */
5567 if (!TARGET_64BIT && local_regparm == 3 && flag_split_stack)
5570 /* Each fixed register usage increases register pressure,
5571 so less registers should be used for argument passing.
5572 This functionality can be overriden by an explicit
5574 for (regno = 0; regno <= DI_REG; regno++)
5575 if (fixed_regs[regno])
5579 = globals < local_regparm ? local_regparm - globals : 0;
5581 if (local_regparm > regparm)
5582 regparm = local_regparm;
5589 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
5590 DFmode (2) arguments in SSE registers for a function with the
5591 indicated TYPE and DECL. DECL may be NULL when calling function
5592 indirectly or considering a libcall. Otherwise return 0. */
5595 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
5597 gcc_assert (!TARGET_64BIT);
5599 /* Use SSE registers to pass SFmode and DFmode arguments if requested
5600 by the sseregparm attribute. */
5601 if (TARGET_SSEREGPARM
5602 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
5609 error ("calling %qD with attribute sseregparm without "
5610 "SSE/SSE2 enabled", decl);
5612 error ("calling %qT with attribute sseregparm without "
5613 "SSE/SSE2 enabled", type);
5621 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
5622 (and DFmode for SSE2) arguments in SSE registers. */
5623 if (decl && TARGET_SSE_MATH && optimize
5624 && !(profile_flag && !flag_fentry))
5626 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5627 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
5628 if (i && i->local && i->can_change_signature)
5629 return TARGET_SSE2 ? 2 : 1;
5635 /* Return true if EAX is live at the start of the function. Used by
5636 ix86_expand_prologue to determine if we need special help before
5637 calling allocate_stack_worker. */
5640 ix86_eax_live_at_start_p (void)
5642 /* Cheat. Don't bother working forward from ix86_function_regparm
5643 to the function type to whether an actual argument is located in
5644 eax. Instead just look at cfg info, which is still close enough
5645 to correct at this point. This gives false positives for broken
5646 functions that might use uninitialized data that happens to be
5647 allocated in eax, but who cares? */
5648 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
5652 ix86_keep_aggregate_return_pointer (tree fntype)
5658 attr = lookup_attribute ("callee_pop_aggregate_return",
5659 TYPE_ATTRIBUTES (fntype));
5661 return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))) == 0);
5663 /* For 32-bit MS-ABI the default is to keep aggregate
5665 if (ix86_function_type_abi (fntype) == MS_ABI)
5668 return KEEP_AGGREGATE_RETURN_POINTER != 0;
5671 /* Value is the number of bytes of arguments automatically
5672 popped when returning from a subroutine call.
5673 FUNDECL is the declaration node of the function (as a tree),
5674 FUNTYPE is the data type of the function (as a tree),
5675 or for a library call it is an identifier node for the subroutine name.
5676 SIZE is the number of bytes of arguments passed on the stack.
5678 On the 80386, the RTD insn may be used to pop them if the number
5679 of args is fixed, but if the number is variable then the caller
5680 must pop them all. RTD can't be used for library calls now
5681 because the library is compiled with the Unix compiler.
5682 Use of RTD is a selectable option, since it is incompatible with
5683 standard Unix calling sequences. If the option is not selected,
5684 the caller must always pop the args.
5686 The attribute stdcall is equivalent to RTD on a per module basis. */
5689 ix86_return_pops_args (tree fundecl, tree funtype, int size)
5693 /* None of the 64-bit ABIs pop arguments. */
5697 ccvt = ix86_get_callcvt (funtype);
5699 if ((ccvt & (IX86_CALLCVT_STDCALL | IX86_CALLCVT_FASTCALL
5700 | IX86_CALLCVT_THISCALL)) != 0
5701 && ! stdarg_p (funtype))
5704 /* Lose any fake structure return argument if it is passed on the stack. */
5705 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
5706 && !ix86_keep_aggregate_return_pointer (funtype))
5708 int nregs = ix86_function_regparm (funtype, fundecl);
5710 return GET_MODE_SIZE (Pmode);
5716 /* Argument support functions. */
5718 /* Return true when register may be used to pass function parameters. */
5720 ix86_function_arg_regno_p (int regno)
5723 const int *parm_regs;
5728 return (regno < REGPARM_MAX
5729 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
5731 return (regno < REGPARM_MAX
5732 || (TARGET_MMX && MMX_REGNO_P (regno)
5733 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
5734 || (TARGET_SSE && SSE_REGNO_P (regno)
5735 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
5740 if (SSE_REGNO_P (regno) && TARGET_SSE)
5745 if (TARGET_SSE && SSE_REGNO_P (regno)
5746 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
5750 /* TODO: The function should depend on current function ABI but
5751 builtins.c would need updating then. Therefore we use the
5754 /* RAX is used as hidden argument to va_arg functions. */
5755 if (ix86_abi == SYSV_ABI && regno == AX_REG)
5758 if (ix86_abi == MS_ABI)
5759 parm_regs = x86_64_ms_abi_int_parameter_registers;
5761 parm_regs = x86_64_int_parameter_registers;
5762 for (i = 0; i < (ix86_abi == MS_ABI
5763 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
5764 if (regno == parm_regs[i])
5769 /* Return if we do not know how to pass TYPE solely in registers. */
5772 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
5774 if (must_pass_in_stack_var_size_or_pad (mode, type))
5777 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5778 The layout_type routine is crafty and tries to trick us into passing
5779 currently unsupported vector types on the stack by using TImode. */
5780 return (!TARGET_64BIT && mode == TImode
5781 && type && TREE_CODE (type) != VECTOR_TYPE);
5784 /* It returns the size, in bytes, of the area reserved for arguments passed
5785 in registers for the function represented by fndecl dependent to the used
5788 ix86_reg_parm_stack_space (const_tree fndecl)
5790 enum calling_abi call_abi = SYSV_ABI;
5791 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
5792 call_abi = ix86_function_abi (fndecl);
5794 call_abi = ix86_function_type_abi (fndecl);
5795 if (TARGET_64BIT && call_abi == MS_ABI)
5800 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5803 ix86_function_type_abi (const_tree fntype)
5805 if (fntype != NULL_TREE && TYPE_ATTRIBUTES (fntype) != NULL_TREE)
5807 enum calling_abi abi = ix86_abi;
5808 if (abi == SYSV_ABI)
5810 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5813 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5821 ix86_function_ms_hook_prologue (const_tree fn)
5823 if (fn && lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn)))
5825 if (decl_function_context (fn) != NULL_TREE)
5826 error_at (DECL_SOURCE_LOCATION (fn),
5827 "ms_hook_prologue is not compatible with nested function");
5834 static enum calling_abi
5835 ix86_function_abi (const_tree fndecl)
5839 return ix86_function_type_abi (TREE_TYPE (fndecl));
5842 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5845 ix86_cfun_abi (void)
5849 return cfun->machine->call_abi;
5852 /* Write the extra assembler code needed to declare a function properly. */
5855 ix86_asm_output_function_label (FILE *asm_out_file, const char *fname,
5858 bool is_ms_hook = ix86_function_ms_hook_prologue (decl);
5862 int i, filler_count = (TARGET_64BIT ? 32 : 16);
5863 unsigned int filler_cc = 0xcccccccc;
5865 for (i = 0; i < filler_count; i += 4)
5866 fprintf (asm_out_file, ASM_LONG " %#x\n", filler_cc);
5869 #ifdef SUBTARGET_ASM_UNWIND_INIT
5870 SUBTARGET_ASM_UNWIND_INIT (asm_out_file);
5873 ASM_OUTPUT_LABEL (asm_out_file, fname);
5875 /* Output magic byte marker, if hot-patch attribute is set. */
5880 /* leaq [%rsp + 0], %rsp */
5881 asm_fprintf (asm_out_file, ASM_BYTE
5882 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5886 /* movl.s %edi, %edi
5888 movl.s %esp, %ebp */
5889 asm_fprintf (asm_out_file, ASM_BYTE
5890 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5896 extern void init_regs (void);
5898 /* Implementation of call abi switching target hook. Specific to FNDECL
5899 the specific call register sets are set. See also
5900 ix86_conditional_register_usage for more details. */
5902 ix86_call_abi_override (const_tree fndecl)
5904 if (fndecl == NULL_TREE)
5905 cfun->machine->call_abi = ix86_abi;
5907 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5910 /* 64-bit MS and SYSV ABI have different set of call used registers. Avoid
5911 expensive re-initialization of init_regs each time we switch function context
5912 since this is needed only during RTL expansion. */
5914 ix86_maybe_switch_abi (void)
5917 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5921 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5922 for a call to a function whose data type is FNTYPE.
5923 For a library call, FNTYPE is 0. */
5926 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5927 tree fntype, /* tree ptr for function decl */
5928 rtx libname, /* SYMBOL_REF of library name or 0 */
5932 struct cgraph_local_info *i;
5935 memset (cum, 0, sizeof (*cum));
5937 /* Initialize for the current callee. */
5940 cfun->machine->callee_pass_avx256_p = false;
5941 cfun->machine->callee_return_avx256_p = false;
5946 i = cgraph_local_info (fndecl);
5947 cum->call_abi = ix86_function_abi (fndecl);
5948 fnret_type = TREE_TYPE (TREE_TYPE (fndecl));
5953 cum->call_abi = ix86_function_type_abi (fntype);
5955 fnret_type = TREE_TYPE (fntype);
5960 if (TARGET_VZEROUPPER && fnret_type)
5962 rtx fnret_value = ix86_function_value (fnret_type, fntype,
5964 if (function_pass_avx256_p (fnret_value))
5966 /* The return value of this function uses 256bit AVX modes. */
5968 cfun->machine->callee_return_avx256_p = true;
5970 cfun->machine->caller_return_avx256_p = true;
5974 cum->caller = caller;
5976 /* Set up the number of registers to use for passing arguments. */
5978 if (TARGET_64BIT && cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5979 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5980 "or subtarget optimization implying it");
5981 cum->nregs = ix86_regparm;
5984 cum->nregs = (cum->call_abi == SYSV_ABI
5985 ? X86_64_REGPARM_MAX
5986 : X86_64_MS_REGPARM_MAX);
5990 cum->sse_nregs = SSE_REGPARM_MAX;
5993 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5994 ? X86_64_SSE_REGPARM_MAX
5995 : X86_64_MS_SSE_REGPARM_MAX);
5999 cum->mmx_nregs = MMX_REGPARM_MAX;
6000 cum->warn_avx = true;
6001 cum->warn_sse = true;
6002 cum->warn_mmx = true;
6004 /* Because type might mismatch in between caller and callee, we need to
6005 use actual type of function for local calls.
6006 FIXME: cgraph_analyze can be told to actually record if function uses
6007 va_start so for local functions maybe_vaarg can be made aggressive
6009 FIXME: once typesytem is fixed, we won't need this code anymore. */
6010 if (i && i->local && i->can_change_signature)
6011 fntype = TREE_TYPE (fndecl);
6012 cum->maybe_vaarg = (fntype
6013 ? (!prototype_p (fntype) || stdarg_p (fntype))
6018 /* If there are variable arguments, then we won't pass anything
6019 in registers in 32-bit mode. */
6020 if (stdarg_p (fntype))
6031 /* Use ecx and edx registers if function has fastcall attribute,
6032 else look for regparm information. */
6035 unsigned int ccvt = ix86_get_callcvt (fntype);
6036 if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
6039 cum->fastcall = 1; /* Same first register as in fastcall. */
6041 else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
6047 cum->nregs = ix86_function_regparm (fntype, fndecl);
6050 /* Set up the number of SSE registers used for passing SFmode
6051 and DFmode arguments. Warn for mismatching ABI. */
6052 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
6056 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
6057 But in the case of vector types, it is some vector mode.
6059 When we have only some of our vector isa extensions enabled, then there
6060 are some modes for which vector_mode_supported_p is false. For these
6061 modes, the generic vector support in gcc will choose some non-vector mode
6062 in order to implement the type. By computing the natural mode, we'll
6063 select the proper ABI location for the operand and not depend on whatever
6064 the middle-end decides to do with these vector types.
6066 The midde-end can't deal with the vector types > 16 bytes. In this
6067 case, we return the original mode and warn ABI change if CUM isn't
6070 static enum machine_mode
6071 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
6073 enum machine_mode mode = TYPE_MODE (type);
6075 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
6077 HOST_WIDE_INT size = int_size_in_bytes (type);
6078 if ((size == 8 || size == 16 || size == 32)
6079 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
6080 && TYPE_VECTOR_SUBPARTS (type) > 1)
6082 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
6084 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6085 mode = MIN_MODE_VECTOR_FLOAT;
6087 mode = MIN_MODE_VECTOR_INT;
6089 /* Get the mode which has this inner mode and number of units. */
6090 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
6091 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
6092 && GET_MODE_INNER (mode) == innermode)
6094 if (size == 32 && !TARGET_AVX)
6096 static bool warnedavx;
6103 warning (0, "AVX vector argument without AVX "
6104 "enabled changes the ABI");
6106 return TYPE_MODE (type);
6119 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
6120 this may not agree with the mode that the type system has chosen for the
6121 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
6122 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
6125 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
6130 if (orig_mode != BLKmode)
6131 tmp = gen_rtx_REG (orig_mode, regno);
6134 tmp = gen_rtx_REG (mode, regno);
6135 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
6136 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
6142 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
6143 of this code is to classify each 8bytes of incoming argument by the register
6144 class and assign registers accordingly. */
6146 /* Return the union class of CLASS1 and CLASS2.
6147 See the x86-64 PS ABI for details. */
6149 static enum x86_64_reg_class
6150 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
6152 /* Rule #1: If both classes are equal, this is the resulting class. */
6153 if (class1 == class2)
6156 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
6158 if (class1 == X86_64_NO_CLASS)
6160 if (class2 == X86_64_NO_CLASS)
6163 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
6164 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
6165 return X86_64_MEMORY_CLASS;
6167 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
6168 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
6169 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
6170 return X86_64_INTEGERSI_CLASS;
6171 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
6172 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
6173 return X86_64_INTEGER_CLASS;
6175 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
6177 if (class1 == X86_64_X87_CLASS
6178 || class1 == X86_64_X87UP_CLASS
6179 || class1 == X86_64_COMPLEX_X87_CLASS
6180 || class2 == X86_64_X87_CLASS
6181 || class2 == X86_64_X87UP_CLASS
6182 || class2 == X86_64_COMPLEX_X87_CLASS)
6183 return X86_64_MEMORY_CLASS;
6185 /* Rule #6: Otherwise class SSE is used. */
6186 return X86_64_SSE_CLASS;
6189 /* Classify the argument of type TYPE and mode MODE.
6190 CLASSES will be filled by the register class used to pass each word
6191 of the operand. The number of words is returned. In case the parameter
6192 should be passed in memory, 0 is returned. As a special case for zero
6193 sized containers, classes[0] will be NO_CLASS and 1 is returned.
6195 BIT_OFFSET is used internally for handling records and specifies offset
6196 of the offset in bits modulo 256 to avoid overflow cases.
6198 See the x86-64 PS ABI for details.
6202 classify_argument (enum machine_mode mode, const_tree type,
6203 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
6205 HOST_WIDE_INT bytes =
6206 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6207 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6209 /* Variable sized entities are always passed/returned in memory. */
6213 if (mode != VOIDmode
6214 && targetm.calls.must_pass_in_stack (mode, type))
6217 if (type && AGGREGATE_TYPE_P (type))
6221 enum x86_64_reg_class subclasses[MAX_CLASSES];
6223 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
6227 for (i = 0; i < words; i++)
6228 classes[i] = X86_64_NO_CLASS;
6230 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
6231 signalize memory class, so handle it as special case. */
6234 classes[0] = X86_64_NO_CLASS;
6238 /* Classify each field of record and merge classes. */
6239 switch (TREE_CODE (type))
6242 /* And now merge the fields of structure. */
6243 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6245 if (TREE_CODE (field) == FIELD_DECL)
6249 if (TREE_TYPE (field) == error_mark_node)
6252 /* Bitfields are always classified as integer. Handle them
6253 early, since later code would consider them to be
6254 misaligned integers. */
6255 if (DECL_BIT_FIELD (field))
6257 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
6258 i < ((int_bit_position (field) + (bit_offset % 64))
6259 + tree_low_cst (DECL_SIZE (field), 0)
6262 merge_classes (X86_64_INTEGER_CLASS,
6269 type = TREE_TYPE (field);
6271 /* Flexible array member is ignored. */
6272 if (TYPE_MODE (type) == BLKmode
6273 && TREE_CODE (type) == ARRAY_TYPE
6274 && TYPE_SIZE (type) == NULL_TREE
6275 && TYPE_DOMAIN (type) != NULL_TREE
6276 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
6281 if (!warned && warn_psabi)
6284 inform (input_location,
6285 "the ABI of passing struct with"
6286 " a flexible array member has"
6287 " changed in GCC 4.4");
6291 num = classify_argument (TYPE_MODE (type), type,
6293 (int_bit_position (field)
6294 + bit_offset) % 256);
6297 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
6298 for (i = 0; i < num && (i + pos) < words; i++)
6300 merge_classes (subclasses[i], classes[i + pos]);
6307 /* Arrays are handled as small records. */
6310 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
6311 TREE_TYPE (type), subclasses, bit_offset);
6315 /* The partial classes are now full classes. */
6316 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
6317 subclasses[0] = X86_64_SSE_CLASS;
6318 if (subclasses[0] == X86_64_INTEGERSI_CLASS
6319 && !((bit_offset % 64) == 0 && bytes == 4))
6320 subclasses[0] = X86_64_INTEGER_CLASS;
6322 for (i = 0; i < words; i++)
6323 classes[i] = subclasses[i % num];
6328 case QUAL_UNION_TYPE:
6329 /* Unions are similar to RECORD_TYPE but offset is always 0.
6331 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6333 if (TREE_CODE (field) == FIELD_DECL)
6337 if (TREE_TYPE (field) == error_mark_node)
6340 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
6341 TREE_TYPE (field), subclasses,
6345 for (i = 0; i < num; i++)
6346 classes[i] = merge_classes (subclasses[i], classes[i]);
6357 /* When size > 16 bytes, if the first one isn't
6358 X86_64_SSE_CLASS or any other ones aren't
6359 X86_64_SSEUP_CLASS, everything should be passed in
6361 if (classes[0] != X86_64_SSE_CLASS)
6364 for (i = 1; i < words; i++)
6365 if (classes[i] != X86_64_SSEUP_CLASS)
6369 /* Final merger cleanup. */
6370 for (i = 0; i < words; i++)
6372 /* If one class is MEMORY, everything should be passed in
6374 if (classes[i] == X86_64_MEMORY_CLASS)
6377 /* The X86_64_SSEUP_CLASS should be always preceded by
6378 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
6379 if (classes[i] == X86_64_SSEUP_CLASS
6380 && classes[i - 1] != X86_64_SSE_CLASS
6381 && classes[i - 1] != X86_64_SSEUP_CLASS)
6383 /* The first one should never be X86_64_SSEUP_CLASS. */
6384 gcc_assert (i != 0);
6385 classes[i] = X86_64_SSE_CLASS;
6388 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
6389 everything should be passed in memory. */
6390 if (classes[i] == X86_64_X87UP_CLASS
6391 && (classes[i - 1] != X86_64_X87_CLASS))
6395 /* The first one should never be X86_64_X87UP_CLASS. */
6396 gcc_assert (i != 0);
6397 if (!warned && warn_psabi)
6400 inform (input_location,
6401 "the ABI of passing union with long double"
6402 " has changed in GCC 4.4");
6410 /* Compute alignment needed. We align all types to natural boundaries with
6411 exception of XFmode that is aligned to 64bits. */
6412 if (mode != VOIDmode && mode != BLKmode)
6414 int mode_alignment = GET_MODE_BITSIZE (mode);
6417 mode_alignment = 128;
6418 else if (mode == XCmode)
6419 mode_alignment = 256;
6420 if (COMPLEX_MODE_P (mode))
6421 mode_alignment /= 2;
6422 /* Misaligned fields are always returned in memory. */
6423 if (bit_offset % mode_alignment)
6427 /* for V1xx modes, just use the base mode */
6428 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
6429 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
6430 mode = GET_MODE_INNER (mode);
6432 /* Classification of atomic types. */
6437 classes[0] = X86_64_SSE_CLASS;
6440 classes[0] = X86_64_SSE_CLASS;
6441 classes[1] = X86_64_SSEUP_CLASS;
6451 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
6455 classes[0] = X86_64_INTEGERSI_CLASS;
6458 else if (size <= 64)
6460 classes[0] = X86_64_INTEGER_CLASS;
6463 else if (size <= 64+32)
6465 classes[0] = X86_64_INTEGER_CLASS;
6466 classes[1] = X86_64_INTEGERSI_CLASS;
6469 else if (size <= 64+64)
6471 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6479 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6483 /* OImode shouldn't be used directly. */
6488 if (!(bit_offset % 64))
6489 classes[0] = X86_64_SSESF_CLASS;
6491 classes[0] = X86_64_SSE_CLASS;
6494 classes[0] = X86_64_SSEDF_CLASS;
6497 classes[0] = X86_64_X87_CLASS;
6498 classes[1] = X86_64_X87UP_CLASS;
6501 classes[0] = X86_64_SSE_CLASS;
6502 classes[1] = X86_64_SSEUP_CLASS;
6505 classes[0] = X86_64_SSE_CLASS;
6506 if (!(bit_offset % 64))
6512 if (!warned && warn_psabi)
6515 inform (input_location,
6516 "the ABI of passing structure with complex float"
6517 " member has changed in GCC 4.4");
6519 classes[1] = X86_64_SSESF_CLASS;
6523 classes[0] = X86_64_SSEDF_CLASS;
6524 classes[1] = X86_64_SSEDF_CLASS;
6527 classes[0] = X86_64_COMPLEX_X87_CLASS;
6530 /* This modes is larger than 16 bytes. */
6538 classes[0] = X86_64_SSE_CLASS;
6539 classes[1] = X86_64_SSEUP_CLASS;
6540 classes[2] = X86_64_SSEUP_CLASS;
6541 classes[3] = X86_64_SSEUP_CLASS;
6549 classes[0] = X86_64_SSE_CLASS;
6550 classes[1] = X86_64_SSEUP_CLASS;
6558 classes[0] = X86_64_SSE_CLASS;
6564 gcc_assert (VECTOR_MODE_P (mode));
6569 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
6571 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
6572 classes[0] = X86_64_INTEGERSI_CLASS;
6574 classes[0] = X86_64_INTEGER_CLASS;
6575 classes[1] = X86_64_INTEGER_CLASS;
6576 return 1 + (bytes > 8);
6580 /* Examine the argument and return set number of register required in each
6581 class. Return 0 iff parameter should be passed in memory. */
6583 examine_argument (enum machine_mode mode, const_tree type, int in_return,
6584 int *int_nregs, int *sse_nregs)
6586 enum x86_64_reg_class regclass[MAX_CLASSES];
6587 int n = classify_argument (mode, type, regclass, 0);
6593 for (n--; n >= 0; n--)
6594 switch (regclass[n])
6596 case X86_64_INTEGER_CLASS:
6597 case X86_64_INTEGERSI_CLASS:
6600 case X86_64_SSE_CLASS:
6601 case X86_64_SSESF_CLASS:
6602 case X86_64_SSEDF_CLASS:
6605 case X86_64_NO_CLASS:
6606 case X86_64_SSEUP_CLASS:
6608 case X86_64_X87_CLASS:
6609 case X86_64_X87UP_CLASS:
6613 case X86_64_COMPLEX_X87_CLASS:
6614 return in_return ? 2 : 0;
6615 case X86_64_MEMORY_CLASS:
6621 /* Construct container for the argument used by GCC interface. See
6622 FUNCTION_ARG for the detailed description. */
6625 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
6626 const_tree type, int in_return, int nintregs, int nsseregs,
6627 const int *intreg, int sse_regno)
6629 /* The following variables hold the static issued_error state. */
6630 static bool issued_sse_arg_error;
6631 static bool issued_sse_ret_error;
6632 static bool issued_x87_ret_error;
6634 enum machine_mode tmpmode;
6636 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6637 enum x86_64_reg_class regclass[MAX_CLASSES];
6641 int needed_sseregs, needed_intregs;
6642 rtx exp[MAX_CLASSES];
6645 n = classify_argument (mode, type, regclass, 0);
6648 if (!examine_argument (mode, type, in_return, &needed_intregs,
6651 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
6654 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
6655 some less clueful developer tries to use floating-point anyway. */
6656 if (needed_sseregs && !TARGET_SSE)
6660 if (!issued_sse_ret_error)
6662 error ("SSE register return with SSE disabled");
6663 issued_sse_ret_error = true;
6666 else if (!issued_sse_arg_error)
6668 error ("SSE register argument with SSE disabled");
6669 issued_sse_arg_error = true;
6674 /* Likewise, error if the ABI requires us to return values in the
6675 x87 registers and the user specified -mno-80387. */
6676 if (!TARGET_80387 && in_return)
6677 for (i = 0; i < n; i++)
6678 if (regclass[i] == X86_64_X87_CLASS
6679 || regclass[i] == X86_64_X87UP_CLASS
6680 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
6682 if (!issued_x87_ret_error)
6684 error ("x87 register return with x87 disabled");
6685 issued_x87_ret_error = true;
6690 /* First construct simple cases. Avoid SCmode, since we want to use
6691 single register to pass this type. */
6692 if (n == 1 && mode != SCmode)
6693 switch (regclass[0])
6695 case X86_64_INTEGER_CLASS:
6696 case X86_64_INTEGERSI_CLASS:
6697 return gen_rtx_REG (mode, intreg[0]);
6698 case X86_64_SSE_CLASS:
6699 case X86_64_SSESF_CLASS:
6700 case X86_64_SSEDF_CLASS:
6701 if (mode != BLKmode)
6702 return gen_reg_or_parallel (mode, orig_mode,
6703 SSE_REGNO (sse_regno));
6705 case X86_64_X87_CLASS:
6706 case X86_64_COMPLEX_X87_CLASS:
6707 return gen_rtx_REG (mode, FIRST_STACK_REG);
6708 case X86_64_NO_CLASS:
6709 /* Zero sized array, struct or class. */
6714 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
6715 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
6716 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6718 && regclass[0] == X86_64_SSE_CLASS
6719 && regclass[1] == X86_64_SSEUP_CLASS
6720 && regclass[2] == X86_64_SSEUP_CLASS
6721 && regclass[3] == X86_64_SSEUP_CLASS
6723 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6726 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
6727 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
6728 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
6729 && regclass[1] == X86_64_INTEGER_CLASS
6730 && (mode == CDImode || mode == TImode || mode == TFmode)
6731 && intreg[0] + 1 == intreg[1])
6732 return gen_rtx_REG (mode, intreg[0]);
6734 /* Otherwise figure out the entries of the PARALLEL. */
6735 for (i = 0; i < n; i++)
6739 switch (regclass[i])
6741 case X86_64_NO_CLASS:
6743 case X86_64_INTEGER_CLASS:
6744 case X86_64_INTEGERSI_CLASS:
6745 /* Merge TImodes on aligned occasions here too. */
6746 if (i * 8 + 8 > bytes)
6747 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
6748 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
6752 /* We've requested 24 bytes we don't have mode for. Use DImode. */
6753 if (tmpmode == BLKmode)
6755 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6756 gen_rtx_REG (tmpmode, *intreg),
6760 case X86_64_SSESF_CLASS:
6761 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6762 gen_rtx_REG (SFmode,
6763 SSE_REGNO (sse_regno)),
6767 case X86_64_SSEDF_CLASS:
6768 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6769 gen_rtx_REG (DFmode,
6770 SSE_REGNO (sse_regno)),
6774 case X86_64_SSE_CLASS:
6782 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
6792 && regclass[1] == X86_64_SSEUP_CLASS
6793 && regclass[2] == X86_64_SSEUP_CLASS
6794 && regclass[3] == X86_64_SSEUP_CLASS);
6801 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6802 gen_rtx_REG (tmpmode,
6803 SSE_REGNO (sse_regno)),
6812 /* Empty aligned struct, union or class. */
6816 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
6817 for (i = 0; i < nexps; i++)
6818 XVECEXP (ret, 0, i) = exp [i];
6822 /* Update the data in CUM to advance over an argument of mode MODE
6823 and data type TYPE. (TYPE is null for libcalls where that information
6824 may not be available.) */
6827 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6828 const_tree type, HOST_WIDE_INT bytes,
6829 HOST_WIDE_INT words)
6845 cum->words += words;
6846 cum->nregs -= words;
6847 cum->regno += words;
6849 if (cum->nregs <= 0)
6857 /* OImode shouldn't be used directly. */
6861 if (cum->float_in_sse < 2)
6864 if (cum->float_in_sse < 1)
6881 if (!type || !AGGREGATE_TYPE_P (type))
6883 cum->sse_words += words;
6884 cum->sse_nregs -= 1;
6885 cum->sse_regno += 1;
6886 if (cum->sse_nregs <= 0)
6900 if (!type || !AGGREGATE_TYPE_P (type))
6902 cum->mmx_words += words;
6903 cum->mmx_nregs -= 1;
6904 cum->mmx_regno += 1;
6905 if (cum->mmx_nregs <= 0)
6916 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6917 const_tree type, HOST_WIDE_INT words, bool named)
6919 int int_nregs, sse_nregs;
6921 /* Unnamed 256bit vector mode parameters are passed on stack. */
6922 if (!named && VALID_AVX256_REG_MODE (mode))
6925 if (examine_argument (mode, type, 0, &int_nregs, &sse_nregs)
6926 && sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6928 cum->nregs -= int_nregs;
6929 cum->sse_nregs -= sse_nregs;
6930 cum->regno += int_nregs;
6931 cum->sse_regno += sse_nregs;
6935 int align = ix86_function_arg_boundary (mode, type) / BITS_PER_WORD;
6936 cum->words = (cum->words + align - 1) & ~(align - 1);
6937 cum->words += words;
6942 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6943 HOST_WIDE_INT words)
6945 /* Otherwise, this should be passed indirect. */
6946 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6948 cum->words += words;
6956 /* Update the data in CUM to advance over an argument of mode MODE and
6957 data type TYPE. (TYPE is null for libcalls where that information
6958 may not be available.) */
6961 ix86_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6962 const_tree type, bool named)
6964 HOST_WIDE_INT bytes, words;
6966 if (mode == BLKmode)
6967 bytes = int_size_in_bytes (type);
6969 bytes = GET_MODE_SIZE (mode);
6970 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6973 mode = type_natural_mode (type, NULL);
6975 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6976 function_arg_advance_ms_64 (cum, bytes, words);
6977 else if (TARGET_64BIT)
6978 function_arg_advance_64 (cum, mode, type, words, named);
6980 function_arg_advance_32 (cum, mode, type, bytes, words);
6983 /* Define where to put the arguments to a function.
6984 Value is zero to push the argument on the stack,
6985 or a hard register in which to store the argument.
6987 MODE is the argument's machine mode.
6988 TYPE is the data type of the argument (as a tree).
6989 This is null for libcalls where that information may
6991 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6992 the preceding args and about the function being called.
6993 NAMED is nonzero if this argument is a named parameter
6994 (otherwise it is an extra parameter matching an ellipsis). */
6997 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6998 enum machine_mode orig_mode, const_tree type,
6999 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
7001 static bool warnedsse, warnedmmx;
7003 /* Avoid the AL settings for the Unix64 ABI. */
7004 if (mode == VOIDmode)
7020 if (words <= cum->nregs)
7022 int regno = cum->regno;
7024 /* Fastcall allocates the first two DWORD (SImode) or
7025 smaller arguments to ECX and EDX if it isn't an
7031 || (type && AGGREGATE_TYPE_P (type)))
7034 /* ECX not EAX is the first allocated register. */
7035 if (regno == AX_REG)
7038 return gen_rtx_REG (mode, regno);
7043 if (cum->float_in_sse < 2)
7046 if (cum->float_in_sse < 1)
7050 /* In 32bit, we pass TImode in xmm registers. */
7057 if (!type || !AGGREGATE_TYPE_P (type))
7059 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
7062 warning (0, "SSE vector argument without SSE enabled "
7066 return gen_reg_or_parallel (mode, orig_mode,
7067 cum->sse_regno + FIRST_SSE_REG);
7072 /* OImode shouldn't be used directly. */
7081 if (!type || !AGGREGATE_TYPE_P (type))
7084 return gen_reg_or_parallel (mode, orig_mode,
7085 cum->sse_regno + FIRST_SSE_REG);
7095 if (!type || !AGGREGATE_TYPE_P (type))
7097 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
7100 warning (0, "MMX vector argument without MMX enabled "
7104 return gen_reg_or_parallel (mode, orig_mode,
7105 cum->mmx_regno + FIRST_MMX_REG);
7114 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7115 enum machine_mode orig_mode, const_tree type, bool named)
7117 /* Handle a hidden AL argument containing number of registers
7118 for varargs x86-64 functions. */
7119 if (mode == VOIDmode)
7120 return GEN_INT (cum->maybe_vaarg
7121 ? (cum->sse_nregs < 0
7122 ? X86_64_SSE_REGPARM_MAX
7137 /* Unnamed 256bit vector mode parameters are passed on stack. */
7143 return construct_container (mode, orig_mode, type, 0, cum->nregs,
7145 &x86_64_int_parameter_registers [cum->regno],
7150 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7151 enum machine_mode orig_mode, bool named,
7152 HOST_WIDE_INT bytes)
7156 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
7157 We use value of -2 to specify that current function call is MSABI. */
7158 if (mode == VOIDmode)
7159 return GEN_INT (-2);
7161 /* If we've run out of registers, it goes on the stack. */
7162 if (cum->nregs == 0)
7165 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
7167 /* Only floating point modes are passed in anything but integer regs. */
7168 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
7171 regno = cum->regno + FIRST_SSE_REG;
7176 /* Unnamed floating parameters are passed in both the
7177 SSE and integer registers. */
7178 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
7179 t2 = gen_rtx_REG (mode, regno);
7180 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
7181 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
7182 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
7185 /* Handle aggregated types passed in register. */
7186 if (orig_mode == BLKmode)
7188 if (bytes > 0 && bytes <= 8)
7189 mode = (bytes > 4 ? DImode : SImode);
7190 if (mode == BLKmode)
7194 return gen_reg_or_parallel (mode, orig_mode, regno);
7197 /* Return where to put the arguments to a function.
7198 Return zero to push the argument on the stack, or a hard register in which to store the argument.
7200 MODE is the argument's machine mode. TYPE is the data type of the
7201 argument. It is null for libcalls where that information may not be
7202 available. CUM gives information about the preceding args and about
7203 the function being called. NAMED is nonzero if this argument is a
7204 named parameter (otherwise it is an extra parameter matching an
7208 ix86_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
7209 const_tree type, bool named)
7211 enum machine_mode mode = omode;
7212 HOST_WIDE_INT bytes, words;
7215 if (mode == BLKmode)
7216 bytes = int_size_in_bytes (type);
7218 bytes = GET_MODE_SIZE (mode);
7219 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7221 /* To simplify the code below, represent vector types with a vector mode
7222 even if MMX/SSE are not active. */
7223 if (type && TREE_CODE (type) == VECTOR_TYPE)
7224 mode = type_natural_mode (type, cum);
7226 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
7227 arg = function_arg_ms_64 (cum, mode, omode, named, bytes);
7228 else if (TARGET_64BIT)
7229 arg = function_arg_64 (cum, mode, omode, type, named);
7231 arg = function_arg_32 (cum, mode, omode, type, bytes, words);
7233 if (TARGET_VZEROUPPER && function_pass_avx256_p (arg))
7235 /* This argument uses 256bit AVX modes. */
7237 cfun->machine->callee_pass_avx256_p = true;
7239 cfun->machine->caller_pass_avx256_p = true;
7245 /* A C expression that indicates when an argument must be passed by
7246 reference. If nonzero for an argument, a copy of that argument is
7247 made in memory and a pointer to the argument is passed instead of
7248 the argument itself. The pointer is passed in whatever way is
7249 appropriate for passing a pointer to that type. */
7252 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7253 enum machine_mode mode ATTRIBUTE_UNUSED,
7254 const_tree type, bool named ATTRIBUTE_UNUSED)
7256 /* See Windows x64 Software Convention. */
7257 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
7259 int msize = (int) GET_MODE_SIZE (mode);
7262 /* Arrays are passed by reference. */
7263 if (TREE_CODE (type) == ARRAY_TYPE)
7266 if (AGGREGATE_TYPE_P (type))
7268 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
7269 are passed by reference. */
7270 msize = int_size_in_bytes (type);
7274 /* __m128 is passed by reference. */
7276 case 1: case 2: case 4: case 8:
7282 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
7288 /* Return true when TYPE should be 128bit aligned for 32bit argument
7289 passing ABI. XXX: This function is obsolete and is only used for
7290 checking psABI compatibility with previous versions of GCC. */
7293 ix86_compat_aligned_value_p (const_tree type)
7295 enum machine_mode mode = TYPE_MODE (type);
7296 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
7300 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
7302 if (TYPE_ALIGN (type) < 128)
7305 if (AGGREGATE_TYPE_P (type))
7307 /* Walk the aggregates recursively. */
7308 switch (TREE_CODE (type))
7312 case QUAL_UNION_TYPE:
7316 /* Walk all the structure fields. */
7317 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
7319 if (TREE_CODE (field) == FIELD_DECL
7320 && ix86_compat_aligned_value_p (TREE_TYPE (field)))
7327 /* Just for use if some languages passes arrays by value. */
7328 if (ix86_compat_aligned_value_p (TREE_TYPE (type)))
7339 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
7340 XXX: This function is obsolete and is only used for checking psABI
7341 compatibility with previous versions of GCC. */
7344 ix86_compat_function_arg_boundary (enum machine_mode mode,
7345 const_tree type, unsigned int align)
7347 /* In 32bit, only _Decimal128 and __float128 are aligned to their
7348 natural boundaries. */
7349 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
7351 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
7352 make an exception for SSE modes since these require 128bit
7355 The handling here differs from field_alignment. ICC aligns MMX
7356 arguments to 4 byte boundaries, while structure fields are aligned
7357 to 8 byte boundaries. */
7360 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
7361 align = PARM_BOUNDARY;
7365 if (!ix86_compat_aligned_value_p (type))
7366 align = PARM_BOUNDARY;
7369 if (align > BIGGEST_ALIGNMENT)
7370 align = BIGGEST_ALIGNMENT;
7374 /* Return true when TYPE should be 128bit aligned for 32bit argument
7378 ix86_contains_aligned_value_p (const_tree type)
7380 enum machine_mode mode = TYPE_MODE (type);
7382 if (mode == XFmode || mode == XCmode)
7385 if (TYPE_ALIGN (type) < 128)
7388 if (AGGREGATE_TYPE_P (type))
7390 /* Walk the aggregates recursively. */
7391 switch (TREE_CODE (type))
7395 case QUAL_UNION_TYPE:
7399 /* Walk all the structure fields. */
7400 for (field = TYPE_FIELDS (type);
7402 field = DECL_CHAIN (field))
7404 if (TREE_CODE (field) == FIELD_DECL
7405 && ix86_contains_aligned_value_p (TREE_TYPE (field)))
7412 /* Just for use if some languages passes arrays by value. */
7413 if (ix86_contains_aligned_value_p (TREE_TYPE (type)))
7422 return TYPE_ALIGN (type) >= 128;
7427 /* Gives the alignment boundary, in bits, of an argument with the
7428 specified mode and type. */
7431 ix86_function_arg_boundary (enum machine_mode mode, const_tree type)
7436 /* Since the main variant type is used for call, we convert it to
7437 the main variant type. */
7438 type = TYPE_MAIN_VARIANT (type);
7439 align = TYPE_ALIGN (type);
7442 align = GET_MODE_ALIGNMENT (mode);
7443 if (align < PARM_BOUNDARY)
7444 align = PARM_BOUNDARY;
7448 unsigned int saved_align = align;
7452 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
7455 if (mode == XFmode || mode == XCmode)
7456 align = PARM_BOUNDARY;
7458 else if (!ix86_contains_aligned_value_p (type))
7459 align = PARM_BOUNDARY;
7462 align = PARM_BOUNDARY;
7467 && align != ix86_compat_function_arg_boundary (mode, type,
7471 inform (input_location,
7472 "The ABI for passing parameters with %d-byte"
7473 " alignment has changed in GCC 4.6",
7474 align / BITS_PER_UNIT);
7481 /* Return true if N is a possible register number of function value. */
7484 ix86_function_value_regno_p (const unsigned int regno)
7491 case FIRST_FLOAT_REG:
7492 /* TODO: The function should depend on current function ABI but
7493 builtins.c would need updating then. Therefore we use the
7495 if (TARGET_64BIT && ix86_abi == MS_ABI)
7497 return TARGET_FLOAT_RETURNS_IN_80387;
7503 if (TARGET_MACHO || TARGET_64BIT)
7511 /* Define how to find the value returned by a function.
7512 VALTYPE is the data type of the value (as a tree).
7513 If the precise function being called is known, FUNC is its FUNCTION_DECL;
7514 otherwise, FUNC is 0. */
7517 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
7518 const_tree fntype, const_tree fn)
7522 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
7523 we normally prevent this case when mmx is not available. However
7524 some ABIs may require the result to be returned like DImode. */
7525 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7526 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
7528 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
7529 we prevent this case when sse is not available. However some ABIs
7530 may require the result to be returned like integer TImode. */
7531 else if (mode == TImode
7532 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7533 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
7535 /* 32-byte vector modes in %ymm0. */
7536 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
7537 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
7539 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
7540 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
7541 regno = FIRST_FLOAT_REG;
7543 /* Most things go in %eax. */
7546 /* Override FP return register with %xmm0 for local functions when
7547 SSE math is enabled or for functions with sseregparm attribute. */
7548 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
7550 int sse_level = ix86_function_sseregparm (fntype, fn, false);
7551 if ((sse_level >= 1 && mode == SFmode)
7552 || (sse_level == 2 && mode == DFmode))
7553 regno = FIRST_SSE_REG;
7556 /* OImode shouldn't be used directly. */
7557 gcc_assert (mode != OImode);
7559 return gen_rtx_REG (orig_mode, regno);
7563 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
7568 /* Handle libcalls, which don't provide a type node. */
7569 if (valtype == NULL)
7581 return gen_rtx_REG (mode, FIRST_SSE_REG);
7584 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
7588 return gen_rtx_REG (mode, AX_REG);
7592 ret = construct_container (mode, orig_mode, valtype, 1,
7593 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
7594 x86_64_int_return_registers, 0);
7596 /* For zero sized structures, construct_container returns NULL, but we
7597 need to keep rest of compiler happy by returning meaningful value. */
7599 ret = gen_rtx_REG (orig_mode, AX_REG);
7605 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
7607 unsigned int regno = AX_REG;
7611 switch (GET_MODE_SIZE (mode))
7614 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7615 && !COMPLEX_MODE_P (mode))
7616 regno = FIRST_SSE_REG;
7620 if (mode == SFmode || mode == DFmode)
7621 regno = FIRST_SSE_REG;
7627 return gen_rtx_REG (orig_mode, regno);
7631 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
7632 enum machine_mode orig_mode, enum machine_mode mode)
7634 const_tree fn, fntype;
7637 if (fntype_or_decl && DECL_P (fntype_or_decl))
7638 fn = fntype_or_decl;
7639 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
7641 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
7642 return function_value_ms_64 (orig_mode, mode);
7643 else if (TARGET_64BIT)
7644 return function_value_64 (orig_mode, mode, valtype);
7646 return function_value_32 (orig_mode, mode, fntype, fn);
7650 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
7651 bool outgoing ATTRIBUTE_UNUSED)
7653 enum machine_mode mode, orig_mode;
7655 orig_mode = TYPE_MODE (valtype);
7656 mode = type_natural_mode (valtype, NULL);
7657 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
7661 ix86_libcall_value (enum machine_mode mode)
7663 return ix86_function_value_1 (NULL, NULL, mode, mode);
7666 /* Return true iff type is returned in memory. */
7668 static bool ATTRIBUTE_UNUSED
7669 return_in_memory_32 (const_tree type, enum machine_mode mode)
7673 if (mode == BLKmode)
7676 size = int_size_in_bytes (type);
7678 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
7681 if (VECTOR_MODE_P (mode) || mode == TImode)
7683 /* User-created vectors small enough to fit in EAX. */
7687 /* MMX/3dNow values are returned in MM0,
7688 except when it doesn't exits or the ABI prescribes otherwise. */
7690 return !TARGET_MMX || TARGET_VECT8_RETURNS;
7692 /* SSE values are returned in XMM0, except when it doesn't exist. */
7696 /* AVX values are returned in YMM0, except when it doesn't exist. */
7707 /* OImode shouldn't be used directly. */
7708 gcc_assert (mode != OImode);
7713 static bool ATTRIBUTE_UNUSED
7714 return_in_memory_64 (const_tree type, enum machine_mode mode)
7716 int needed_intregs, needed_sseregs;
7717 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
7720 static bool ATTRIBUTE_UNUSED
7721 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
7723 HOST_WIDE_INT size = int_size_in_bytes (type);
7725 /* __m128 is returned in xmm0. */
7726 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7727 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
7730 /* Otherwise, the size must be exactly in [1248]. */
7731 return size != 1 && size != 2 && size != 4 && size != 8;
7735 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7737 #ifdef SUBTARGET_RETURN_IN_MEMORY
7738 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
7740 const enum machine_mode mode = type_natural_mode (type, NULL);
7744 if (ix86_function_type_abi (fntype) == MS_ABI)
7745 return return_in_memory_ms_64 (type, mode);
7747 return return_in_memory_64 (type, mode);
7750 return return_in_memory_32 (type, mode);
7754 /* When returning SSE vector types, we have a choice of either
7755 (1) being abi incompatible with a -march switch, or
7756 (2) generating an error.
7757 Given no good solution, I think the safest thing is one warning.
7758 The user won't be able to use -Werror, but....
7760 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
7761 called in response to actually generating a caller or callee that
7762 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
7763 via aggregate_value_p for general type probing from tree-ssa. */
7766 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
7768 static bool warnedsse, warnedmmx;
7770 if (!TARGET_64BIT && type)
7772 /* Look at the return type of the function, not the function type. */
7773 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
7775 if (!TARGET_SSE && !warnedsse)
7778 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7781 warning (0, "SSE vector return without SSE enabled "
7786 if (!TARGET_MMX && !warnedmmx)
7788 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7791 warning (0, "MMX vector return without MMX enabled "
7801 /* Create the va_list data type. */
7803 /* Returns the calling convention specific va_list date type.
7804 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
7807 ix86_build_builtin_va_list_abi (enum calling_abi abi)
7809 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
7811 /* For i386 we use plain pointer to argument area. */
7812 if (!TARGET_64BIT || abi == MS_ABI)
7813 return build_pointer_type (char_type_node);
7815 record = lang_hooks.types.make_type (RECORD_TYPE);
7816 type_decl = build_decl (BUILTINS_LOCATION,
7817 TYPE_DECL, get_identifier ("__va_list_tag"), record);
7819 f_gpr = build_decl (BUILTINS_LOCATION,
7820 FIELD_DECL, get_identifier ("gp_offset"),
7821 unsigned_type_node);
7822 f_fpr = build_decl (BUILTINS_LOCATION,
7823 FIELD_DECL, get_identifier ("fp_offset"),
7824 unsigned_type_node);
7825 f_ovf = build_decl (BUILTINS_LOCATION,
7826 FIELD_DECL, get_identifier ("overflow_arg_area"),
7828 f_sav = build_decl (BUILTINS_LOCATION,
7829 FIELD_DECL, get_identifier ("reg_save_area"),
7832 va_list_gpr_counter_field = f_gpr;
7833 va_list_fpr_counter_field = f_fpr;
7835 DECL_FIELD_CONTEXT (f_gpr) = record;
7836 DECL_FIELD_CONTEXT (f_fpr) = record;
7837 DECL_FIELD_CONTEXT (f_ovf) = record;
7838 DECL_FIELD_CONTEXT (f_sav) = record;
7840 TYPE_STUB_DECL (record) = type_decl;
7841 TYPE_NAME (record) = type_decl;
7842 TYPE_FIELDS (record) = f_gpr;
7843 DECL_CHAIN (f_gpr) = f_fpr;
7844 DECL_CHAIN (f_fpr) = f_ovf;
7845 DECL_CHAIN (f_ovf) = f_sav;
7847 layout_type (record);
7849 /* The correct type is an array type of one element. */
7850 return build_array_type (record, build_index_type (size_zero_node));
7853 /* Setup the builtin va_list data type and for 64-bit the additional
7854 calling convention specific va_list data types. */
7857 ix86_build_builtin_va_list (void)
7859 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
7861 /* Initialize abi specific va_list builtin types. */
7865 if (ix86_abi == MS_ABI)
7867 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
7868 if (TREE_CODE (t) != RECORD_TYPE)
7869 t = build_variant_type_copy (t);
7870 sysv_va_list_type_node = t;
7875 if (TREE_CODE (t) != RECORD_TYPE)
7876 t = build_variant_type_copy (t);
7877 sysv_va_list_type_node = t;
7879 if (ix86_abi != MS_ABI)
7881 t = ix86_build_builtin_va_list_abi (MS_ABI);
7882 if (TREE_CODE (t) != RECORD_TYPE)
7883 t = build_variant_type_copy (t);
7884 ms_va_list_type_node = t;
7889 if (TREE_CODE (t) != RECORD_TYPE)
7890 t = build_variant_type_copy (t);
7891 ms_va_list_type_node = t;
7898 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7901 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
7907 /* GPR size of varargs save area. */
7908 if (cfun->va_list_gpr_size)
7909 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
7911 ix86_varargs_gpr_size = 0;
7913 /* FPR size of varargs save area. We don't need it if we don't pass
7914 anything in SSE registers. */
7915 if (TARGET_SSE && cfun->va_list_fpr_size)
7916 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
7918 ix86_varargs_fpr_size = 0;
7920 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
7923 save_area = frame_pointer_rtx;
7924 set = get_varargs_alias_set ();
7926 max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
7927 if (max > X86_64_REGPARM_MAX)
7928 max = X86_64_REGPARM_MAX;
7930 for (i = cum->regno; i < max; i++)
7932 mem = gen_rtx_MEM (Pmode,
7933 plus_constant (save_area, i * UNITS_PER_WORD));
7934 MEM_NOTRAP_P (mem) = 1;
7935 set_mem_alias_set (mem, set);
7936 emit_move_insn (mem, gen_rtx_REG (Pmode,
7937 x86_64_int_parameter_registers[i]));
7940 if (ix86_varargs_fpr_size)
7942 enum machine_mode smode;
7945 /* Now emit code to save SSE registers. The AX parameter contains number
7946 of SSE parameter registers used to call this function, though all we
7947 actually check here is the zero/non-zero status. */
7949 label = gen_label_rtx ();
7950 test = gen_rtx_EQ (VOIDmode, gen_rtx_REG (QImode, AX_REG), const0_rtx);
7951 emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
7954 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7955 we used movdqa (i.e. TImode) instead? Perhaps even better would
7956 be if we could determine the real mode of the data, via a hook
7957 into pass_stdarg. Ignore all that for now. */
7959 if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
7960 crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);
7962 max = cum->sse_regno + cfun->va_list_fpr_size / 16;
7963 if (max > X86_64_SSE_REGPARM_MAX)
7964 max = X86_64_SSE_REGPARM_MAX;
7966 for (i = cum->sse_regno; i < max; ++i)
7968 mem = plus_constant (save_area, i * 16 + ix86_varargs_gpr_size);
7969 mem = gen_rtx_MEM (smode, mem);
7970 MEM_NOTRAP_P (mem) = 1;
7971 set_mem_alias_set (mem, set);
7972 set_mem_align (mem, GET_MODE_ALIGNMENT (smode));
7974 emit_move_insn (mem, gen_rtx_REG (smode, SSE_REGNO (i)));
7982 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7984 alias_set_type set = get_varargs_alias_set ();
7987 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
7991 mem = gen_rtx_MEM (Pmode,
7992 plus_constant (virtual_incoming_args_rtx,
7993 i * UNITS_PER_WORD));
7994 MEM_NOTRAP_P (mem) = 1;
7995 set_mem_alias_set (mem, set);
7997 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
7998 emit_move_insn (mem, reg);
8003 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8004 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8007 CUMULATIVE_ARGS next_cum;
8010 /* This argument doesn't appear to be used anymore. Which is good,
8011 because the old code here didn't suppress rtl generation. */
8012 gcc_assert (!no_rtl);
8017 fntype = TREE_TYPE (current_function_decl);
8019 /* For varargs, we do not want to skip the dummy va_dcl argument.
8020 For stdargs, we do want to skip the last named argument. */
8022 if (stdarg_p (fntype))
8023 ix86_function_arg_advance (&next_cum, mode, type, true);
8025 if (cum->call_abi == MS_ABI)
8026 setup_incoming_varargs_ms_64 (&next_cum);
8028 setup_incoming_varargs_64 (&next_cum);
8031 /* Checks if TYPE is of kind va_list char *. */
8034 is_va_list_char_pointer (tree type)
8038 /* For 32-bit it is always true. */
8041 canonic = ix86_canonical_va_list_type (type);
8042 return (canonic == ms_va_list_type_node
8043 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
8046 /* Implement va_start. */
8049 ix86_va_start (tree valist, rtx nextarg)
8051 HOST_WIDE_INT words, n_gpr, n_fpr;
8052 tree f_gpr, f_fpr, f_ovf, f_sav;
8053 tree gpr, fpr, ovf, sav, t;
8057 if (flag_split_stack
8058 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8060 unsigned int scratch_regno;
8062 /* When we are splitting the stack, we can't refer to the stack
8063 arguments using internal_arg_pointer, because they may be on
8064 the old stack. The split stack prologue will arrange to
8065 leave a pointer to the old stack arguments in a scratch
8066 register, which we here copy to a pseudo-register. The split
8067 stack prologue can't set the pseudo-register directly because
8068 it (the prologue) runs before any registers have been saved. */
8070 scratch_regno = split_stack_prologue_scratch_regno ();
8071 if (scratch_regno != INVALID_REGNUM)
8075 reg = gen_reg_rtx (Pmode);
8076 cfun->machine->split_stack_varargs_pointer = reg;
8079 emit_move_insn (reg, gen_rtx_REG (Pmode, scratch_regno));
8083 push_topmost_sequence ();
8084 emit_insn_after (seq, entry_of_function ());
8085 pop_topmost_sequence ();
8089 /* Only 64bit target needs something special. */
8090 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
8092 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8093 std_expand_builtin_va_start (valist, nextarg);
8098 va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE);
8099 next = expand_binop (ptr_mode, add_optab,
8100 cfun->machine->split_stack_varargs_pointer,
8101 crtl->args.arg_offset_rtx,
8102 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8103 convert_move (va_r, next, 0);
8108 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
8109 f_fpr = DECL_CHAIN (f_gpr);
8110 f_ovf = DECL_CHAIN (f_fpr);
8111 f_sav = DECL_CHAIN (f_ovf);
8113 valist = build_simple_mem_ref (valist);
8114 TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
8115 /* The following should be folded into the MEM_REF offset. */
8116 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
8118 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
8120 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
8122 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
8125 /* Count number of gp and fp argument registers used. */
8126 words = crtl->args.info.words;
8127 n_gpr = crtl->args.info.regno;
8128 n_fpr = crtl->args.info.sse_regno;
8130 if (cfun->va_list_gpr_size)
8132 type = TREE_TYPE (gpr);
8133 t = build2 (MODIFY_EXPR, type,
8134 gpr, build_int_cst (type, n_gpr * 8));
8135 TREE_SIDE_EFFECTS (t) = 1;
8136 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8139 if (TARGET_SSE && cfun->va_list_fpr_size)
8141 type = TREE_TYPE (fpr);
8142 t = build2 (MODIFY_EXPR, type, fpr,
8143 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
8144 TREE_SIDE_EFFECTS (t) = 1;
8145 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8148 /* Find the overflow area. */
8149 type = TREE_TYPE (ovf);
8150 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
8151 ovf_rtx = crtl->args.internal_arg_pointer;
8153 ovf_rtx = cfun->machine->split_stack_varargs_pointer;
8154 t = make_tree (type, ovf_rtx);
8156 t = build2 (POINTER_PLUS_EXPR, type, t,
8157 size_int (words * UNITS_PER_WORD));
8158 t = build2 (MODIFY_EXPR, type, ovf, t);
8159 TREE_SIDE_EFFECTS (t) = 1;
8160 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8162 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
8164 /* Find the register save area.
8165 Prologue of the function save it right above stack frame. */
8166 type = TREE_TYPE (sav);
8167 t = make_tree (type, frame_pointer_rtx);
8168 if (!ix86_varargs_gpr_size)
8169 t = build2 (POINTER_PLUS_EXPR, type, t,
8170 size_int (-8 * X86_64_REGPARM_MAX));
8171 t = build2 (MODIFY_EXPR, type, sav, t);
8172 TREE_SIDE_EFFECTS (t) = 1;
8173 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8177 /* Implement va_arg. */
8180 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8183 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
8184 tree f_gpr, f_fpr, f_ovf, f_sav;
8185 tree gpr, fpr, ovf, sav, t;
8187 tree lab_false, lab_over = NULL_TREE;
8192 enum machine_mode nat_mode;
8193 unsigned int arg_boundary;
8195 /* Only 64bit target needs something special. */
8196 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
8197 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
8199 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
8200 f_fpr = DECL_CHAIN (f_gpr);
8201 f_ovf = DECL_CHAIN (f_fpr);
8202 f_sav = DECL_CHAIN (f_ovf);
8204 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
8205 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
8206 valist = build_va_arg_indirect_ref (valist);
8207 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8208 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8209 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8211 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
8213 type = build_pointer_type (type);
8214 size = int_size_in_bytes (type);
8215 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8217 nat_mode = type_natural_mode (type, NULL);
8226 /* Unnamed 256bit vector mode parameters are passed on stack. */
8227 if (!TARGET_64BIT_MS_ABI)
8234 container = construct_container (nat_mode, TYPE_MODE (type),
8235 type, 0, X86_64_REGPARM_MAX,
8236 X86_64_SSE_REGPARM_MAX, intreg,
8241 /* Pull the value out of the saved registers. */
8243 addr = create_tmp_var (ptr_type_node, "addr");
8247 int needed_intregs, needed_sseregs;
8249 tree int_addr, sse_addr;
8251 lab_false = create_artificial_label (UNKNOWN_LOCATION);
8252 lab_over = create_artificial_label (UNKNOWN_LOCATION);
8254 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
8256 need_temp = (!REG_P (container)
8257 && ((needed_intregs && TYPE_ALIGN (type) > 64)
8258 || TYPE_ALIGN (type) > 128));
8260 /* In case we are passing structure, verify that it is consecutive block
8261 on the register save area. If not we need to do moves. */
8262 if (!need_temp && !REG_P (container))
8264 /* Verify that all registers are strictly consecutive */
8265 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
8269 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
8271 rtx slot = XVECEXP (container, 0, i);
8272 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
8273 || INTVAL (XEXP (slot, 1)) != i * 16)
8281 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
8283 rtx slot = XVECEXP (container, 0, i);
8284 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
8285 || INTVAL (XEXP (slot, 1)) != i * 8)
8297 int_addr = create_tmp_var (ptr_type_node, "int_addr");
8298 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
8301 /* First ensure that we fit completely in registers. */
8304 t = build_int_cst (TREE_TYPE (gpr),
8305 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
8306 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
8307 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8308 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8309 gimplify_and_add (t, pre_p);
8313 t = build_int_cst (TREE_TYPE (fpr),
8314 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
8315 + X86_64_REGPARM_MAX * 8);
8316 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
8317 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
8318 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
8319 gimplify_and_add (t, pre_p);
8322 /* Compute index to start of area used for integer regs. */
8325 /* int_addr = gpr + sav; */
8326 t = fold_convert (sizetype, gpr);
8327 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8328 gimplify_assign (int_addr, t, pre_p);
8332 /* sse_addr = fpr + sav; */
8333 t = fold_convert (sizetype, fpr);
8334 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
8335 gimplify_assign (sse_addr, t, pre_p);
8339 int i, prev_size = 0;
8340 tree temp = create_tmp_var (type, "va_arg_tmp");
8343 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
8344 gimplify_assign (addr, t, pre_p);
8346 for (i = 0; i < XVECLEN (container, 0); i++)
8348 rtx slot = XVECEXP (container, 0, i);
8349 rtx reg = XEXP (slot, 0);
8350 enum machine_mode mode = GET_MODE (reg);
8356 tree dest_addr, dest;
8357 int cur_size = GET_MODE_SIZE (mode);
8359 gcc_assert (prev_size <= INTVAL (XEXP (slot, 1)));
8360 prev_size = INTVAL (XEXP (slot, 1));
8361 if (prev_size + cur_size > size)
8363 cur_size = size - prev_size;
8364 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
8365 if (mode == BLKmode)
8368 piece_type = lang_hooks.types.type_for_mode (mode, 1);
8369 if (mode == GET_MODE (reg))
8370 addr_type = build_pointer_type (piece_type);
8372 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8374 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8377 if (SSE_REGNO_P (REGNO (reg)))
8379 src_addr = sse_addr;
8380 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
8384 src_addr = int_addr;
8385 src_offset = REGNO (reg) * 8;
8387 src_addr = fold_convert (addr_type, src_addr);
8388 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
8389 size_int (src_offset));
8391 dest_addr = fold_convert (daddr_type, addr);
8392 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
8393 size_int (prev_size));
8394 if (cur_size == GET_MODE_SIZE (mode))
8396 src = build_va_arg_indirect_ref (src_addr);
8397 dest = build_va_arg_indirect_ref (dest_addr);
8399 gimplify_assign (dest, src, pre_p);
8404 = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
8405 3, dest_addr, src_addr,
8406 size_int (cur_size));
8407 gimplify_and_add (copy, pre_p);
8409 prev_size += cur_size;
8415 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
8416 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
8417 gimplify_assign (gpr, t, pre_p);
8422 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
8423 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
8424 gimplify_assign (fpr, t, pre_p);
8427 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8429 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8432 /* ... otherwise out of the overflow area. */
8434 /* When we align parameter on stack for caller, if the parameter
8435 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
8436 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
8437 here with caller. */
8438 arg_boundary = ix86_function_arg_boundary (VOIDmode, type);
8439 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
8440 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
8442 /* Care for on-stack alignment if needed. */
8443 if (arg_boundary <= 64 || size == 0)
8447 HOST_WIDE_INT align = arg_boundary / 8;
8448 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
8449 size_int (align - 1));
8450 t = fold_convert (sizetype, t);
8451 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
8453 t = fold_convert (TREE_TYPE (ovf), t);
8456 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8457 gimplify_assign (addr, t, pre_p);
8459 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
8460 size_int (rsize * UNITS_PER_WORD));
8461 gimplify_assign (unshare_expr (ovf), t, pre_p);
8464 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8466 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
8467 addr = fold_convert (ptrtype, addr);
8470 addr = build_va_arg_indirect_ref (addr);
8471 return build_va_arg_indirect_ref (addr);
8474 /* Return true if OPNUM's MEM should be matched
8475 in movabs* patterns. */
8478 ix86_check_movabs (rtx insn, int opnum)
8482 set = PATTERN (insn);
8483 if (GET_CODE (set) == PARALLEL)
8484 set = XVECEXP (set, 0, 0);
8485 gcc_assert (GET_CODE (set) == SET);
8486 mem = XEXP (set, opnum);
8487 while (GET_CODE (mem) == SUBREG)
8488 mem = SUBREG_REG (mem);
8489 gcc_assert (MEM_P (mem));
8490 return volatile_ok || !MEM_VOLATILE_P (mem);
8493 /* Initialize the table of extra 80387 mathematical constants. */
8496 init_ext_80387_constants (void)
8498 static const char * cst[5] =
8500 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
8501 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
8502 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
8503 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
8504 "3.1415926535897932385128089594061862044", /* 4: fldpi */
8508 for (i = 0; i < 5; i++)
8510 real_from_string (&ext_80387_constants_table[i], cst[i]);
8511 /* Ensure each constant is rounded to XFmode precision. */
8512 real_convert (&ext_80387_constants_table[i],
8513 XFmode, &ext_80387_constants_table[i]);
8516 ext_80387_constants_init = 1;
8519 /* Return non-zero if the constant is something that
8520 can be loaded with a special instruction. */
8523 standard_80387_constant_p (rtx x)
8525 enum machine_mode mode = GET_MODE (x);
8529 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
8532 if (x == CONST0_RTX (mode))
8534 if (x == CONST1_RTX (mode))
8537 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8539 /* For XFmode constants, try to find a special 80387 instruction when
8540 optimizing for size or on those CPUs that benefit from them. */
8542 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
8546 if (! ext_80387_constants_init)
8547 init_ext_80387_constants ();
8549 for (i = 0; i < 5; i++)
8550 if (real_identical (&r, &ext_80387_constants_table[i]))
8554 /* Load of the constant -0.0 or -1.0 will be split as
8555 fldz;fchs or fld1;fchs sequence. */
8556 if (real_isnegzero (&r))
8558 if (real_identical (&r, &dconstm1))
8564 /* Return the opcode of the special instruction to be used to load
8568 standard_80387_constant_opcode (rtx x)
8570 switch (standard_80387_constant_p (x))
8594 /* Return the CONST_DOUBLE representing the 80387 constant that is
8595 loaded by the specified special instruction. The argument IDX
8596 matches the return value from standard_80387_constant_p. */
8599 standard_80387_constant_rtx (int idx)
8603 if (! ext_80387_constants_init)
8604 init_ext_80387_constants ();
8620 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
8624 /* Return 1 if X is all 0s and 2 if x is all 1s
8625 in supported SSE vector mode. */
8628 standard_sse_constant_p (rtx x)
8630 enum machine_mode mode = GET_MODE (x);
8632 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
8634 if (vector_all_ones_operand (x, mode))
8650 /* Return the opcode of the special instruction to be used to load
8654 standard_sse_constant_opcode (rtx insn, rtx x)
8656 switch (standard_sse_constant_p (x))
8659 switch (get_attr_mode (insn))
8662 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8664 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8665 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8667 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
8669 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8670 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
8672 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
8674 return "vxorps\t%x0, %x0, %x0";
8676 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8677 return "vxorps\t%x0, %x0, %x0";
8679 return "vxorpd\t%x0, %x0, %x0";
8681 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8682 return "vxorps\t%x0, %x0, %x0";
8684 return "vpxor\t%x0, %x0, %x0";
8689 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
8696 /* Returns true if OP contains a symbol reference */
8699 symbolic_reference_mentioned_p (rtx op)
8704 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
8707 fmt = GET_RTX_FORMAT (GET_CODE (op));
8708 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
8714 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
8715 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
8719 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
8726 /* Return true if it is appropriate to emit `ret' instructions in the
8727 body of a function. Do this only if the epilogue is simple, needing a
8728 couple of insns. Prior to reloading, we can't tell how many registers
8729 must be saved, so return false then. Return false if there is no frame
8730 marker to de-allocate. */
8733 ix86_can_use_return_insn_p (void)
8735 struct ix86_frame frame;
8737 if (! reload_completed || frame_pointer_needed)
8740 /* Don't allow more than 32k pop, since that's all we can do
8741 with one instruction. */
8742 if (crtl->args.pops_args && crtl->args.size >= 32768)
8745 ix86_compute_frame_layout (&frame);
8746 return (frame.stack_pointer_offset == UNITS_PER_WORD
8747 && (frame.nregs + frame.nsseregs) == 0);
8750 /* Value should be nonzero if functions must have frame pointers.
8751 Zero means the frame pointer need not be set up (and parms may
8752 be accessed via the stack pointer) in functions that seem suitable. */
8755 ix86_frame_pointer_required (void)
8757 /* If we accessed previous frames, then the generated code expects
8758 to be able to access the saved ebp value in our frame. */
8759 if (cfun->machine->accesses_prev_frame)
8762 /* Several x86 os'es need a frame pointer for other reasons,
8763 usually pertaining to setjmp. */
8764 if (SUBTARGET_FRAME_POINTER_REQUIRED)
8767 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
8768 turns off the frame pointer by default. Turn it back on now if
8769 we've not got a leaf function. */
8770 if (TARGET_OMIT_LEAF_FRAME_POINTER
8771 && (!current_function_is_leaf
8772 || ix86_current_function_calls_tls_descriptor))
8775 if (crtl->profile && !flag_fentry)
8781 /* Record that the current function accesses previous call frames. */
8784 ix86_setup_frame_addresses (void)
8786 cfun->machine->accesses_prev_frame = 1;
8789 #ifndef USE_HIDDEN_LINKONCE
8790 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
8791 # define USE_HIDDEN_LINKONCE 1
8793 # define USE_HIDDEN_LINKONCE 0
8797 static int pic_labels_used;
8799 /* Fills in the label name that should be used for a pc thunk for
8800 the given register. */
8803 get_pc_thunk_name (char name[32], unsigned int regno)
8805 gcc_assert (!TARGET_64BIT);
8807 if (USE_HIDDEN_LINKONCE)
8808 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
8810 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
8814 /* This function generates code for -fpic that loads %ebx with
8815 the return address of the caller and then returns. */
8818 ix86_code_end (void)
8823 for (regno = AX_REG; regno <= SP_REG; regno++)
8828 if (!(pic_labels_used & (1 << regno)))
8831 get_pc_thunk_name (name, regno);
8833 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
8834 get_identifier (name),
8835 build_function_type_list (void_type_node, NULL_TREE));
8836 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
8837 NULL_TREE, void_type_node);
8838 TREE_PUBLIC (decl) = 1;
8839 TREE_STATIC (decl) = 1;
8844 switch_to_section (darwin_sections[text_coal_section]);
8845 fputs ("\t.weak_definition\t", asm_out_file);
8846 assemble_name (asm_out_file, name);
8847 fputs ("\n\t.private_extern\t", asm_out_file);
8848 assemble_name (asm_out_file, name);
8849 putc ('\n', asm_out_file);
8850 ASM_OUTPUT_LABEL (asm_out_file, name);
8851 DECL_WEAK (decl) = 1;
8855 if (USE_HIDDEN_LINKONCE)
8857 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
8859 targetm.asm_out.unique_section (decl, 0);
8860 switch_to_section (get_named_section (decl, NULL, 0));
8862 targetm.asm_out.globalize_label (asm_out_file, name);
8863 fputs ("\t.hidden\t", asm_out_file);
8864 assemble_name (asm_out_file, name);
8865 putc ('\n', asm_out_file);
8866 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
8870 switch_to_section (text_section);
8871 ASM_OUTPUT_LABEL (asm_out_file, name);
8874 DECL_INITIAL (decl) = make_node (BLOCK);
8875 current_function_decl = decl;
8876 init_function_start (decl);
8877 first_function_block_is_cold = false;
8878 /* Make sure unwind info is emitted for the thunk if needed. */
8879 final_start_function (emit_barrier (), asm_out_file, 1);
8881 /* Pad stack IP move with 4 instructions (two NOPs count
8882 as one instruction). */
8883 if (TARGET_PAD_SHORT_FUNCTION)
8888 fputs ("\tnop\n", asm_out_file);
8891 xops[0] = gen_rtx_REG (Pmode, regno);
8892 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8893 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
8894 fputs ("\tret\n", asm_out_file);
8895 final_end_function ();
8896 init_insn_lengths ();
8897 free_after_compilation (cfun);
8899 current_function_decl = NULL;
8902 if (flag_split_stack)
8903 file_end_indicate_split_stack ();
8906 /* Emit code for the SET_GOT patterns. */
8909 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
8915 if (TARGET_VXWORKS_RTP && flag_pic)
8917 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8918 xops[2] = gen_rtx_MEM (Pmode,
8919 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
8920 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
8922 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8923 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8924 an unadorned address. */
8925 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
8926 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
8927 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
8931 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
8933 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
8935 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
8938 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
8941 output_asm_insn ("call\t%a2", xops);
8942 #ifdef DWARF2_UNWIND_INFO
8943 /* The call to next label acts as a push. */
8944 if (dwarf2out_do_frame ())
8948 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8949 gen_rtx_PLUS (Pmode,
8952 RTX_FRAME_RELATED_P (insn) = 1;
8953 dwarf2out_frame_debug (insn, true);
8960 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8961 is what will be referenced by the Mach-O PIC subsystem. */
8963 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8966 targetm.asm_out.internal_label (asm_out_file, "L",
8967 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
8971 output_asm_insn ("pop%z0\t%0", xops);
8972 #ifdef DWARF2_UNWIND_INFO
8973 /* The pop is a pop and clobbers dest, but doesn't restore it
8974 for unwind info purposes. */
8975 if (dwarf2out_do_frame ())
8979 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
8980 dwarf2out_frame_debug (insn, true);
8981 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8982 gen_rtx_PLUS (Pmode,
8985 RTX_FRAME_RELATED_P (insn) = 1;
8986 dwarf2out_frame_debug (insn, true);
8995 get_pc_thunk_name (name, REGNO (dest));
8996 pic_labels_used |= 1 << REGNO (dest);
8998 #ifdef DWARF2_UNWIND_INFO
8999 /* Ensure all queued register saves are flushed before the
9001 if (dwarf2out_do_frame ())
9002 dwarf2out_flush_queued_reg_saves ();
9004 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
9005 xops[2] = gen_rtx_MEM (QImode, xops[2]);
9006 output_asm_insn ("call\t%X2", xops);
9007 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
9008 is what will be referenced by the Mach-O PIC subsystem. */
9011 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
9013 targetm.asm_out.internal_label (asm_out_file, "L",
9014 CODE_LABEL_NUMBER (label));
9021 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
9022 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
9024 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
9029 /* Generate an "push" pattern for input ARG. */
9034 struct machine_function *m = cfun->machine;
9036 if (m->fs.cfa_reg == stack_pointer_rtx)
9037 m->fs.cfa_offset += UNITS_PER_WORD;
9038 m->fs.sp_offset += UNITS_PER_WORD;
9040 return gen_rtx_SET (VOIDmode,
9042 gen_rtx_PRE_DEC (Pmode,
9043 stack_pointer_rtx)),
9047 /* Generate an "pop" pattern for input ARG. */
9052 return gen_rtx_SET (VOIDmode,
9055 gen_rtx_POST_INC (Pmode,
9056 stack_pointer_rtx)));
9059 /* Return >= 0 if there is an unused call-clobbered register available
9060 for the entire function. */
9063 ix86_select_alt_pic_regnum (void)
9065 if (current_function_is_leaf
9067 && !ix86_current_function_calls_tls_descriptor)
9070 /* Can't use the same register for both PIC and DRAP. */
9072 drap = REGNO (crtl->drap_reg);
9075 for (i = 2; i >= 0; --i)
9076 if (i != drap && !df_regs_ever_live_p (i))
9080 return INVALID_REGNUM;
9083 /* Return 1 if we need to save REGNO. */
9085 ix86_save_reg (unsigned int regno, int maybe_eh_return)
9087 if (pic_offset_table_rtx
9088 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
9089 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
9091 || crtl->calls_eh_return
9092 || crtl->uses_const_pool))
9094 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
9099 if (crtl->calls_eh_return && maybe_eh_return)
9104 unsigned test = EH_RETURN_DATA_REGNO (i);
9105 if (test == INVALID_REGNUM)
9112 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
9115 return (df_regs_ever_live_p (regno)
9116 && !call_used_regs[regno]
9117 && !fixed_regs[regno]
9118 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
9121 /* Return number of saved general prupose registers. */
9124 ix86_nsaved_regs (void)
9129 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9130 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9135 /* Return number of saved SSE registrers. */
9138 ix86_nsaved_sseregs (void)
9143 if (!TARGET_64BIT_MS_ABI)
9145 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9146 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9151 /* Given FROM and TO register numbers, say whether this elimination is
9152 allowed. If stack alignment is needed, we can only replace argument
9153 pointer with hard frame pointer, or replace frame pointer with stack
9154 pointer. Otherwise, frame pointer elimination is automatically
9155 handled and all other eliminations are valid. */
9158 ix86_can_eliminate (const int from, const int to)
9160 if (stack_realign_fp)
9161 return ((from == ARG_POINTER_REGNUM
9162 && to == HARD_FRAME_POINTER_REGNUM)
9163 || (from == FRAME_POINTER_REGNUM
9164 && to == STACK_POINTER_REGNUM));
9166 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
9169 /* Return the offset between two registers, one to be eliminated, and the other
9170 its replacement, at the start of a routine. */
9173 ix86_initial_elimination_offset (int from, int to)
9175 struct ix86_frame frame;
9176 ix86_compute_frame_layout (&frame);
9178 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9179 return frame.hard_frame_pointer_offset;
9180 else if (from == FRAME_POINTER_REGNUM
9181 && to == HARD_FRAME_POINTER_REGNUM)
9182 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
9185 gcc_assert (to == STACK_POINTER_REGNUM);
9187 if (from == ARG_POINTER_REGNUM)
9188 return frame.stack_pointer_offset;
9190 gcc_assert (from == FRAME_POINTER_REGNUM);
9191 return frame.stack_pointer_offset - frame.frame_pointer_offset;
9195 /* In a dynamically-aligned function, we can't know the offset from
9196 stack pointer to frame pointer, so we must ensure that setjmp
9197 eliminates fp against the hard fp (%ebp) rather than trying to
9198 index from %esp up to the top of the frame across a gap that is
9199 of unknown (at compile-time) size. */
9201 ix86_builtin_setjmp_frame_value (void)
9203 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
9206 /* On the x86 -fsplit-stack and -fstack-protector both use the same
9207 field in the TCB, so they can not be used together. */
9210 ix86_supports_split_stack (bool report ATTRIBUTE_UNUSED,
9211 struct gcc_options *opts ATTRIBUTE_UNUSED)
9215 #ifndef TARGET_THREAD_SPLIT_STACK_OFFSET
9217 error ("%<-fsplit-stack%> currently only supported on GNU/Linux");
9220 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
9223 error ("%<-fsplit-stack%> requires "
9224 "assembler support for CFI directives");
9232 /* When using -fsplit-stack, the allocation routines set a field in
9233 the TCB to the bottom of the stack plus this much space, measured
9236 #define SPLIT_STACK_AVAILABLE 256
9238 /* Fill structure ix86_frame about frame of currently computed function. */
9241 ix86_compute_frame_layout (struct ix86_frame *frame)
9243 unsigned int stack_alignment_needed;
9244 HOST_WIDE_INT offset;
9245 unsigned int preferred_alignment;
9246 HOST_WIDE_INT size = get_frame_size ();
9247 HOST_WIDE_INT to_allocate;
9249 frame->nregs = ix86_nsaved_regs ();
9250 frame->nsseregs = ix86_nsaved_sseregs ();
9252 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
9253 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
9255 /* 64-bit MS ABI seem to require stack alignment to be always 16 except for
9256 function prologues and leaf. */
9257 if ((TARGET_64BIT_MS_ABI && preferred_alignment < 16)
9258 && (!current_function_is_leaf || cfun->calls_alloca != 0
9259 || ix86_current_function_calls_tls_descriptor))
9261 preferred_alignment = 16;
9262 stack_alignment_needed = 16;
9263 crtl->preferred_stack_boundary = 128;
9264 crtl->stack_alignment_needed = 128;
9267 gcc_assert (!size || stack_alignment_needed);
9268 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
9269 gcc_assert (preferred_alignment <= stack_alignment_needed);
9271 /* For SEH we have to limit the amount of code movement into the prologue.
9272 At present we do this via a BLOCKAGE, at which point there's very little
9273 scheduling that can be done, which means that there's very little point
9274 in doing anything except PUSHs. */
9276 cfun->machine->use_fast_prologue_epilogue = false;
9278 /* During reload iteration the amount of registers saved can change.
9279 Recompute the value as needed. Do not recompute when amount of registers
9280 didn't change as reload does multiple calls to the function and does not
9281 expect the decision to change within single iteration. */
9282 else if (!optimize_function_for_size_p (cfun)
9283 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
9285 int count = frame->nregs;
9286 struct cgraph_node *node = cgraph_get_node (current_function_decl);
9288 cfun->machine->use_fast_prologue_epilogue_nregs = count;
9290 /* The fast prologue uses move instead of push to save registers. This
9291 is significantly longer, but also executes faster as modern hardware
9292 can execute the moves in parallel, but can't do that for push/pop.
9294 Be careful about choosing what prologue to emit: When function takes
9295 many instructions to execute we may use slow version as well as in
9296 case function is known to be outside hot spot (this is known with
9297 feedback only). Weight the size of function by number of registers
9298 to save as it is cheap to use one or two push instructions but very
9299 slow to use many of them. */
9301 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
9302 if (node->frequency < NODE_FREQUENCY_NORMAL
9303 || (flag_branch_probabilities
9304 && node->frequency < NODE_FREQUENCY_HOT))
9305 cfun->machine->use_fast_prologue_epilogue = false;
9307 cfun->machine->use_fast_prologue_epilogue
9308 = !expensive_function_p (count);
9310 if (TARGET_PROLOGUE_USING_MOVE
9311 && cfun->machine->use_fast_prologue_epilogue)
9312 frame->save_regs_using_mov = true;
9314 frame->save_regs_using_mov = false;
9316 /* If static stack checking is enabled and done with probes, the registers
9317 need to be saved before allocating the frame. */
9318 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
9319 frame->save_regs_using_mov = false;
9321 /* Skip return address. */
9322 offset = UNITS_PER_WORD;
9324 /* Skip pushed static chain. */
9325 if (ix86_static_chain_on_stack)
9326 offset += UNITS_PER_WORD;
9328 /* Skip saved base pointer. */
9329 if (frame_pointer_needed)
9330 offset += UNITS_PER_WORD;
9331 frame->hfp_save_offset = offset;
9333 /* The traditional frame pointer location is at the top of the frame. */
9334 frame->hard_frame_pointer_offset = offset;
9336 /* Register save area */
9337 offset += frame->nregs * UNITS_PER_WORD;
9338 frame->reg_save_offset = offset;
9340 /* Align and set SSE register save area. */
9341 if (frame->nsseregs)
9343 /* The only ABI that has saved SSE registers (Win64) also has a
9344 16-byte aligned default stack, and thus we don't need to be
9345 within the re-aligned local stack frame to save them. */
9346 gcc_assert (INCOMING_STACK_BOUNDARY >= 128);
9347 offset = (offset + 16 - 1) & -16;
9348 offset += frame->nsseregs * 16;
9350 frame->sse_reg_save_offset = offset;
9352 /* The re-aligned stack starts here. Values before this point are not
9353 directly comparable with values below this point. In order to make
9354 sure that no value happens to be the same before and after, force
9355 the alignment computation below to add a non-zero value. */
9356 if (stack_realign_fp)
9357 offset = (offset + stack_alignment_needed) & -stack_alignment_needed;
9360 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
9361 offset += frame->va_arg_size;
9363 /* Align start of frame for local function. */
9364 if (stack_realign_fp
9365 || offset != frame->sse_reg_save_offset
9367 || !current_function_is_leaf
9368 || cfun->calls_alloca
9369 || ix86_current_function_calls_tls_descriptor)
9370 offset = (offset + stack_alignment_needed - 1) & -stack_alignment_needed;
9372 /* Frame pointer points here. */
9373 frame->frame_pointer_offset = offset;
9377 /* Add outgoing arguments area. Can be skipped if we eliminated
9378 all the function calls as dead code.
9379 Skipping is however impossible when function calls alloca. Alloca
9380 expander assumes that last crtl->outgoing_args_size
9381 of stack frame are unused. */
9382 if (ACCUMULATE_OUTGOING_ARGS
9383 && (!current_function_is_leaf || cfun->calls_alloca
9384 || ix86_current_function_calls_tls_descriptor))
9386 offset += crtl->outgoing_args_size;
9387 frame->outgoing_arguments_size = crtl->outgoing_args_size;
9390 frame->outgoing_arguments_size = 0;
9392 /* Align stack boundary. Only needed if we're calling another function
9394 if (!current_function_is_leaf || cfun->calls_alloca
9395 || ix86_current_function_calls_tls_descriptor)
9396 offset = (offset + preferred_alignment - 1) & -preferred_alignment;
9398 /* We've reached end of stack frame. */
9399 frame->stack_pointer_offset = offset;
9401 /* Size prologue needs to allocate. */
9402 to_allocate = offset - frame->sse_reg_save_offset;
9404 if ((!to_allocate && frame->nregs <= 1)
9405 || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000))
9406 frame->save_regs_using_mov = false;
9408 if (ix86_using_red_zone ()
9409 && current_function_sp_is_unchanging
9410 && current_function_is_leaf
9411 && !ix86_current_function_calls_tls_descriptor)
9413 frame->red_zone_size = to_allocate;
9414 if (frame->save_regs_using_mov)
9415 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
9416 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
9417 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
9420 frame->red_zone_size = 0;
9421 frame->stack_pointer_offset -= frame->red_zone_size;
9423 /* The SEH frame pointer location is near the bottom of the frame.
9424 This is enforced by the fact that the difference between the
9425 stack pointer and the frame pointer is limited to 240 bytes in
9426 the unwind data structure. */
9431 /* If we can leave the frame pointer where it is, do so. */
9432 diff = frame->stack_pointer_offset - frame->hard_frame_pointer_offset;
9433 if (diff > 240 || (diff & 15) != 0)
9435 /* Ideally we'd determine what portion of the local stack frame
9436 (within the constraint of the lowest 240) is most heavily used.
9437 But without that complication, simply bias the frame pointer
9438 by 128 bytes so as to maximize the amount of the local stack
9439 frame that is addressable with 8-bit offsets. */
9440 frame->hard_frame_pointer_offset = frame->stack_pointer_offset - 128;
9445 /* This is semi-inlined memory_address_length, but simplified
9446 since we know that we're always dealing with reg+offset, and
9447 to avoid having to create and discard all that rtl. */
9450 choose_baseaddr_len (unsigned int regno, HOST_WIDE_INT offset)
9456 /* EBP and R13 cannot be encoded without an offset. */
9457 len = (regno == BP_REG || regno == R13_REG);
9459 else if (IN_RANGE (offset, -128, 127))
9462 /* ESP and R12 must be encoded with a SIB byte. */
9463 if (regno == SP_REG || regno == R12_REG)
9469 /* Return an RTX that points to CFA_OFFSET within the stack frame.
9470 The valid base registers are taken from CFUN->MACHINE->FS. */
9473 choose_baseaddr (HOST_WIDE_INT cfa_offset)
9475 const struct machine_function *m = cfun->machine;
9476 rtx base_reg = NULL;
9477 HOST_WIDE_INT base_offset = 0;
9479 if (m->use_fast_prologue_epilogue)
9481 /* Choose the base register most likely to allow the most scheduling
9482 opportunities. Generally FP is valid througout the function,
9483 while DRAP must be reloaded within the epilogue. But choose either
9484 over the SP due to increased encoding size. */
9488 base_reg = hard_frame_pointer_rtx;
9489 base_offset = m->fs.fp_offset - cfa_offset;
9491 else if (m->fs.drap_valid)
9493 base_reg = crtl->drap_reg;
9494 base_offset = 0 - cfa_offset;
9496 else if (m->fs.sp_valid)
9498 base_reg = stack_pointer_rtx;
9499 base_offset = m->fs.sp_offset - cfa_offset;
9504 HOST_WIDE_INT toffset;
9507 /* Choose the base register with the smallest address encoding.
9508 With a tie, choose FP > DRAP > SP. */
9511 base_reg = stack_pointer_rtx;
9512 base_offset = m->fs.sp_offset - cfa_offset;
9513 len = choose_baseaddr_len (STACK_POINTER_REGNUM, base_offset);
9515 if (m->fs.drap_valid)
9517 toffset = 0 - cfa_offset;
9518 tlen = choose_baseaddr_len (REGNO (crtl->drap_reg), toffset);
9521 base_reg = crtl->drap_reg;
9522 base_offset = toffset;
9528 toffset = m->fs.fp_offset - cfa_offset;
9529 tlen = choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM, toffset);
9532 base_reg = hard_frame_pointer_rtx;
9533 base_offset = toffset;
9538 gcc_assert (base_reg != NULL);
9540 return plus_constant (base_reg, base_offset);
9543 /* Emit code to save registers in the prologue. */
9546 ix86_emit_save_regs (void)
9551 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
9552 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9554 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
9555 RTX_FRAME_RELATED_P (insn) = 1;
9559 /* Emit a single register save at CFA - CFA_OFFSET. */
9562 ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
9563 HOST_WIDE_INT cfa_offset)
9565 struct machine_function *m = cfun->machine;
9566 rtx reg = gen_rtx_REG (mode, regno);
9567 rtx mem, addr, base, insn;
9569 addr = choose_baseaddr (cfa_offset);
9570 mem = gen_frame_mem (mode, addr);
9572 /* For SSE saves, we need to indicate the 128-bit alignment. */
9573 set_mem_align (mem, GET_MODE_ALIGNMENT (mode));
9575 insn = emit_move_insn (mem, reg);
9576 RTX_FRAME_RELATED_P (insn) = 1;
9579 if (GET_CODE (base) == PLUS)
9580 base = XEXP (base, 0);
9581 gcc_checking_assert (REG_P (base));
9583 /* When saving registers into a re-aligned local stack frame, avoid
9584 any tricky guessing by dwarf2out. */
9585 if (m->fs.realigned)
9587 gcc_checking_assert (stack_realign_drap);
9589 if (regno == REGNO (crtl->drap_reg))
9591 /* A bit of a hack. We force the DRAP register to be saved in
9592 the re-aligned stack frame, which provides us with a copy
9593 of the CFA that will last past the prologue. Install it. */
9594 gcc_checking_assert (cfun->machine->fs.fp_valid);
9595 addr = plus_constant (hard_frame_pointer_rtx,
9596 cfun->machine->fs.fp_offset - cfa_offset);
9597 mem = gen_rtx_MEM (mode, addr);
9598 add_reg_note (insn, REG_CFA_DEF_CFA, mem);
9602 /* The frame pointer is a stable reference within the
9603 aligned frame. Use it. */
9604 gcc_checking_assert (cfun->machine->fs.fp_valid);
9605 addr = plus_constant (hard_frame_pointer_rtx,
9606 cfun->machine->fs.fp_offset - cfa_offset);
9607 mem = gen_rtx_MEM (mode, addr);
9608 add_reg_note (insn, REG_CFA_EXPRESSION,
9609 gen_rtx_SET (VOIDmode, mem, reg));
9613 /* The memory may not be relative to the current CFA register,
9614 which means that we may need to generate a new pattern for
9615 use by the unwind info. */
9616 else if (base != m->fs.cfa_reg)
9618 addr = plus_constant (m->fs.cfa_reg, m->fs.cfa_offset - cfa_offset);
9619 mem = gen_rtx_MEM (mode, addr);
9620 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, mem, reg));
9624 /* Emit code to save registers using MOV insns.
9625 First register is stored at CFA - CFA_OFFSET. */
9627 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset)
9631 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9632 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9634 ix86_emit_save_reg_using_mov (Pmode, regno, cfa_offset);
9635 cfa_offset -= UNITS_PER_WORD;
9639 /* Emit code to save SSE registers using MOV insns.
9640 First register is stored at CFA - CFA_OFFSET. */
9642 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset)
9646 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9647 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9649 ix86_emit_save_reg_using_mov (V4SFmode, regno, cfa_offset);
9654 static GTY(()) rtx queued_cfa_restores;
9656 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
9657 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
9658 Don't add the note if the previously saved value will be left untouched
9659 within stack red-zone till return, as unwinders can find the same value
9660 in the register and on the stack. */
9663 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT cfa_offset)
9665 if (cfa_offset <= cfun->machine->fs.red_zone_offset)
9670 add_reg_note (insn, REG_CFA_RESTORE, reg);
9671 RTX_FRAME_RELATED_P (insn) = 1;
9675 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
9678 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
9681 ix86_add_queued_cfa_restore_notes (rtx insn)
9684 if (!queued_cfa_restores)
9686 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
9688 XEXP (last, 1) = REG_NOTES (insn);
9689 REG_NOTES (insn) = queued_cfa_restores;
9690 queued_cfa_restores = NULL_RTX;
9691 RTX_FRAME_RELATED_P (insn) = 1;
9694 /* Expand prologue or epilogue stack adjustment.
9695 The pattern exist to put a dependency on all ebp-based memory accesses.
9696 STYLE should be negative if instructions should be marked as frame related,
9697 zero if %r11 register is live and cannot be freely used and positive
9701 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
9702 int style, bool set_cfa)
9704 struct machine_function *m = cfun->machine;
9706 bool add_frame_related_expr = false;
9709 insn = gen_pro_epilogue_adjust_stack_si_add (dest, src, offset);
9710 else if (x86_64_immediate_operand (offset, DImode))
9711 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, offset);
9715 /* r11 is used by indirect sibcall return as well, set before the
9716 epilogue and used after the epilogue. */
9718 tmp = gen_rtx_REG (DImode, R11_REG);
9721 gcc_assert (src != hard_frame_pointer_rtx
9722 && dest != hard_frame_pointer_rtx);
9723 tmp = hard_frame_pointer_rtx;
9725 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
9727 add_frame_related_expr = true;
9729 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, tmp);
9732 insn = emit_insn (insn);
9734 ix86_add_queued_cfa_restore_notes (insn);
9740 gcc_assert (m->fs.cfa_reg == src);
9741 m->fs.cfa_offset += INTVAL (offset);
9742 m->fs.cfa_reg = dest;
9744 r = gen_rtx_PLUS (Pmode, src, offset);
9745 r = gen_rtx_SET (VOIDmode, dest, r);
9746 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9747 RTX_FRAME_RELATED_P (insn) = 1;
9751 RTX_FRAME_RELATED_P (insn) = 1;
9752 if (add_frame_related_expr)
9754 rtx r = gen_rtx_PLUS (Pmode, src, offset);
9755 r = gen_rtx_SET (VOIDmode, dest, r);
9756 add_reg_note (insn, REG_FRAME_RELATED_EXPR, r);
9760 if (dest == stack_pointer_rtx)
9762 HOST_WIDE_INT ooffset = m->fs.sp_offset;
9763 bool valid = m->fs.sp_valid;
9765 if (src == hard_frame_pointer_rtx)
9767 valid = m->fs.fp_valid;
9768 ooffset = m->fs.fp_offset;
9770 else if (src == crtl->drap_reg)
9772 valid = m->fs.drap_valid;
9777 /* Else there are two possibilities: SP itself, which we set
9778 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
9779 taken care of this by hand along the eh_return path. */
9780 gcc_checking_assert (src == stack_pointer_rtx
9781 || offset == const0_rtx);
9784 m->fs.sp_offset = ooffset - INTVAL (offset);
9785 m->fs.sp_valid = valid;
9789 /* Find an available register to be used as dynamic realign argument
9790 pointer regsiter. Such a register will be written in prologue and
9791 used in begin of body, so it must not be
9792 1. parameter passing register.
9794 We reuse static-chain register if it is available. Otherwise, we
9795 use DI for i386 and R13 for x86-64. We chose R13 since it has
9798 Return: the regno of chosen register. */
9801 find_drap_reg (void)
9803 tree decl = cfun->decl;
9807 /* Use R13 for nested function or function need static chain.
9808 Since function with tail call may use any caller-saved
9809 registers in epilogue, DRAP must not use caller-saved
9810 register in such case. */
9811 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9818 /* Use DI for nested function or function need static chain.
9819 Since function with tail call may use any caller-saved
9820 registers in epilogue, DRAP must not use caller-saved
9821 register in such case. */
9822 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9825 /* Reuse static chain register if it isn't used for parameter
9827 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2)
9829 unsigned int ccvt = ix86_get_callcvt (TREE_TYPE (decl));
9830 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) == 0)
9837 /* Return minimum incoming stack alignment. */
9840 ix86_minimum_incoming_stack_boundary (bool sibcall)
9842 unsigned int incoming_stack_boundary;
9844 /* Prefer the one specified at command line. */
9845 if (ix86_user_incoming_stack_boundary)
9846 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
9847 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
9848 if -mstackrealign is used, it isn't used for sibcall check and
9849 estimated stack alignment is 128bit. */
9852 && ix86_force_align_arg_pointer
9853 && crtl->stack_alignment_estimated == 128)
9854 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9856 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
9858 /* Incoming stack alignment can be changed on individual functions
9859 via force_align_arg_pointer attribute. We use the smallest
9860 incoming stack boundary. */
9861 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
9862 && lookup_attribute (ix86_force_align_arg_pointer_string,
9863 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
9864 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9866 /* The incoming stack frame has to be aligned at least at
9867 parm_stack_boundary. */
9868 if (incoming_stack_boundary < crtl->parm_stack_boundary)
9869 incoming_stack_boundary = crtl->parm_stack_boundary;
9871 /* Stack at entrance of main is aligned by runtime. We use the
9872 smallest incoming stack boundary. */
9873 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
9874 && DECL_NAME (current_function_decl)
9875 && MAIN_NAME_P (DECL_NAME (current_function_decl))
9876 && DECL_FILE_SCOPE_P (current_function_decl))
9877 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
9879 return incoming_stack_boundary;
9882 /* Update incoming stack boundary and estimated stack alignment. */
9885 ix86_update_stack_boundary (void)
9887 ix86_incoming_stack_boundary
9888 = ix86_minimum_incoming_stack_boundary (false);
9890 /* x86_64 vararg needs 16byte stack alignment for register save
9894 && crtl->stack_alignment_estimated < 128)
9895 crtl->stack_alignment_estimated = 128;
9898 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9899 needed or an rtx for DRAP otherwise. */
9902 ix86_get_drap_rtx (void)
9904 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
9905 crtl->need_drap = true;
9907 if (stack_realign_drap)
9909 /* Assign DRAP to vDRAP and returns vDRAP */
9910 unsigned int regno = find_drap_reg ();
9915 arg_ptr = gen_rtx_REG (Pmode, regno);
9916 crtl->drap_reg = arg_ptr;
9919 drap_vreg = copy_to_reg (arg_ptr);
9923 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
9926 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
9927 RTX_FRAME_RELATED_P (insn) = 1;
9935 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9938 ix86_internal_arg_pointer (void)
9940 return virtual_incoming_args_rtx;
9943 struct scratch_reg {
9948 /* Return a short-lived scratch register for use on function entry.
9949 In 32-bit mode, it is valid only after the registers are saved
9950 in the prologue. This register must be released by means of
9951 release_scratch_register_on_entry once it is dead. */
9954 get_scratch_register_on_entry (struct scratch_reg *sr)
9962 /* We always use R11 in 64-bit mode. */
9967 tree decl = current_function_decl, fntype = TREE_TYPE (decl);
9969 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
9970 bool static_chain_p = DECL_STATIC_CHAIN (decl);
9971 int regparm = ix86_function_regparm (fntype, decl);
9973 = crtl->drap_reg ? REGNO (crtl->drap_reg) : INVALID_REGNUM;
9975 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9976 for the static chain register. */
9977 if ((regparm < 1 || (fastcall_p && !static_chain_p))
9978 && drap_regno != AX_REG)
9980 else if (regparm < 2 && drap_regno != DX_REG)
9982 /* ecx is the static chain register. */
9983 else if (regparm < 3 && !fastcall_p && !static_chain_p
9984 && drap_regno != CX_REG)
9986 else if (ix86_save_reg (BX_REG, true))
9988 /* esi is the static chain register. */
9989 else if (!(regparm == 3 && static_chain_p)
9990 && ix86_save_reg (SI_REG, true))
9992 else if (ix86_save_reg (DI_REG, true))
9996 regno = (drap_regno == AX_REG ? DX_REG : AX_REG);
10001 sr->reg = gen_rtx_REG (Pmode, regno);
10004 rtx insn = emit_insn (gen_push (sr->reg));
10005 RTX_FRAME_RELATED_P (insn) = 1;
10009 /* Release a scratch register obtained from the preceding function. */
10012 release_scratch_register_on_entry (struct scratch_reg *sr)
10016 rtx x, insn = emit_insn (gen_pop (sr->reg));
10018 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
10019 RTX_FRAME_RELATED_P (insn) = 1;
10020 x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
10021 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10022 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
10026 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
10028 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
10031 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
10033 /* We skip the probe for the first interval + a small dope of 4 words and
10034 probe that many bytes past the specified size to maintain a protection
10035 area at the botton of the stack. */
10036 const int dope = 4 * UNITS_PER_WORD;
10037 rtx size_rtx = GEN_INT (size), last;
10039 /* See if we have a constant small number of probes to generate. If so,
10040 that's the easy case. The run-time loop is made up of 11 insns in the
10041 generic case while the compile-time loop is made up of 3+2*(n-1) insns
10042 for n # of intervals. */
10043 if (size <= 5 * PROBE_INTERVAL)
10045 HOST_WIDE_INT i, adjust;
10046 bool first_probe = true;
10048 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
10049 values of N from 1 until it exceeds SIZE. If only one probe is
10050 needed, this will not generate any code. Then adjust and probe
10051 to PROBE_INTERVAL + SIZE. */
10052 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
10056 adjust = 2 * PROBE_INTERVAL + dope;
10057 first_probe = false;
10060 adjust = PROBE_INTERVAL;
10062 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10063 plus_constant (stack_pointer_rtx, -adjust)));
10064 emit_stack_probe (stack_pointer_rtx);
10068 adjust = size + PROBE_INTERVAL + dope;
10070 adjust = size + PROBE_INTERVAL - i;
10072 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10073 plus_constant (stack_pointer_rtx, -adjust)));
10074 emit_stack_probe (stack_pointer_rtx);
10076 /* Adjust back to account for the additional first interval. */
10077 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10078 plus_constant (stack_pointer_rtx,
10079 PROBE_INTERVAL + dope)));
10082 /* Otherwise, do the same as above, but in a loop. Note that we must be
10083 extra careful with variables wrapping around because we might be at
10084 the very top (or the very bottom) of the address space and we have
10085 to be able to handle this case properly; in particular, we use an
10086 equality test for the loop condition. */
10089 HOST_WIDE_INT rounded_size;
10090 struct scratch_reg sr;
10092 get_scratch_register_on_entry (&sr);
10095 /* Step 1: round SIZE to the previous multiple of the interval. */
10097 rounded_size = size & -PROBE_INTERVAL;
10100 /* Step 2: compute initial and final value of the loop counter. */
10102 /* SP = SP_0 + PROBE_INTERVAL. */
10103 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10104 plus_constant (stack_pointer_rtx,
10105 - (PROBE_INTERVAL + dope))));
10107 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
10108 emit_move_insn (sr.reg, GEN_INT (-rounded_size));
10109 emit_insn (gen_rtx_SET (VOIDmode, sr.reg,
10110 gen_rtx_PLUS (Pmode, sr.reg,
10111 stack_pointer_rtx)));
10114 /* Step 3: the loop
10116 while (SP != LAST_ADDR)
10118 SP = SP + PROBE_INTERVAL
10122 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
10123 values of N from 1 until it is equal to ROUNDED_SIZE. */
10125 emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg, size_rtx));
10128 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
10129 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
10131 if (size != rounded_size)
10133 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10134 plus_constant (stack_pointer_rtx,
10135 rounded_size - size)));
10136 emit_stack_probe (stack_pointer_rtx);
10139 /* Adjust back to account for the additional first interval. */
10140 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10141 plus_constant (stack_pointer_rtx,
10142 PROBE_INTERVAL + dope)));
10144 release_scratch_register_on_entry (&sr);
10147 gcc_assert (cfun->machine->fs.cfa_reg != stack_pointer_rtx);
10149 /* Even if the stack pointer isn't the CFA register, we need to correctly
10150 describe the adjustments made to it, in particular differentiate the
10151 frame-related ones from the frame-unrelated ones. */
10154 rtx expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
10155 XVECEXP (expr, 0, 0)
10156 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10157 plus_constant (stack_pointer_rtx, -size));
10158 XVECEXP (expr, 0, 1)
10159 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10160 plus_constant (stack_pointer_rtx,
10161 PROBE_INTERVAL + dope + size));
10162 add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
10163 RTX_FRAME_RELATED_P (last) = 1;
10165 cfun->machine->fs.sp_offset += size;
10168 /* Make sure nothing is scheduled before we are done. */
10169 emit_insn (gen_blockage ());
10172 /* Adjust the stack pointer up to REG while probing it. */
10175 output_adjust_stack_and_probe (rtx reg)
10177 static int labelno = 0;
10178 char loop_lab[32], end_lab[32];
10181 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10182 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10184 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10186 /* Jump to END_LAB if SP == LAST_ADDR. */
10187 xops[0] = stack_pointer_rtx;
10189 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
10190 fputs ("\tje\t", asm_out_file);
10191 assemble_name_raw (asm_out_file, end_lab);
10192 fputc ('\n', asm_out_file);
10194 /* SP = SP + PROBE_INTERVAL. */
10195 xops[1] = GEN_INT (PROBE_INTERVAL);
10196 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
10199 xops[1] = const0_rtx;
10200 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops);
10202 fprintf (asm_out_file, "\tjmp\t");
10203 assemble_name_raw (asm_out_file, loop_lab);
10204 fputc ('\n', asm_out_file);
10206 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10211 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
10212 inclusive. These are offsets from the current stack pointer. */
10215 ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
10217 /* See if we have a constant small number of probes to generate. If so,
10218 that's the easy case. The run-time loop is made up of 7 insns in the
10219 generic case while the compile-time loop is made up of n insns for n #
10221 if (size <= 7 * PROBE_INTERVAL)
10225 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
10226 it exceeds SIZE. If only one probe is needed, this will not
10227 generate any code. Then probe at FIRST + SIZE. */
10228 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
10229 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
10231 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
10234 /* Otherwise, do the same as above, but in a loop. Note that we must be
10235 extra careful with variables wrapping around because we might be at
10236 the very top (or the very bottom) of the address space and we have
10237 to be able to handle this case properly; in particular, we use an
10238 equality test for the loop condition. */
10241 HOST_WIDE_INT rounded_size, last;
10242 struct scratch_reg sr;
10244 get_scratch_register_on_entry (&sr);
10247 /* Step 1: round SIZE to the previous multiple of the interval. */
10249 rounded_size = size & -PROBE_INTERVAL;
10252 /* Step 2: compute initial and final value of the loop counter. */
10254 /* TEST_OFFSET = FIRST. */
10255 emit_move_insn (sr.reg, GEN_INT (-first));
10257 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
10258 last = first + rounded_size;
10261 /* Step 3: the loop
10263 while (TEST_ADDR != LAST_ADDR)
10265 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
10269 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
10270 until it is equal to ROUNDED_SIZE. */
10272 emit_insn (ix86_gen_probe_stack_range (sr.reg, sr.reg, GEN_INT (-last)));
10275 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
10276 that SIZE is equal to ROUNDED_SIZE. */
10278 if (size != rounded_size)
10279 emit_stack_probe (plus_constant (gen_rtx_PLUS (Pmode,
10282 rounded_size - size));
10284 release_scratch_register_on_entry (&sr);
10287 /* Make sure nothing is scheduled before we are done. */
10288 emit_insn (gen_blockage ());
10291 /* Probe a range of stack addresses from REG to END, inclusive. These are
10292 offsets from the current stack pointer. */
10295 output_probe_stack_range (rtx reg, rtx end)
10297 static int labelno = 0;
10298 char loop_lab[32], end_lab[32];
10301 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10302 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10304 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10306 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
10309 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
10310 fputs ("\tje\t", asm_out_file);
10311 assemble_name_raw (asm_out_file, end_lab);
10312 fputc ('\n', asm_out_file);
10314 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
10315 xops[1] = GEN_INT (PROBE_INTERVAL);
10316 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
10318 /* Probe at TEST_ADDR. */
10319 xops[0] = stack_pointer_rtx;
10321 xops[2] = const0_rtx;
10322 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops);
10324 fprintf (asm_out_file, "\tjmp\t");
10325 assemble_name_raw (asm_out_file, loop_lab);
10326 fputc ('\n', asm_out_file);
10328 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10333 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
10334 to be generated in correct form. */
10336 ix86_finalize_stack_realign_flags (void)
10338 /* Check if stack realign is really needed after reload, and
10339 stores result in cfun */
10340 unsigned int incoming_stack_boundary
10341 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
10342 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
10343 unsigned int stack_realign = (incoming_stack_boundary
10344 < (current_function_is_leaf
10345 ? crtl->max_used_stack_slot_alignment
10346 : crtl->stack_alignment_needed));
10348 if (crtl->stack_realign_finalized)
10350 /* After stack_realign_needed is finalized, we can't no longer
10352 gcc_assert (crtl->stack_realign_needed == stack_realign);
10356 crtl->stack_realign_needed = stack_realign;
10357 crtl->stack_realign_finalized = true;
10361 /* Expand the prologue into a bunch of separate insns. */
10364 ix86_expand_prologue (void)
10366 struct machine_function *m = cfun->machine;
10369 struct ix86_frame frame;
10370 HOST_WIDE_INT allocate;
10371 bool int_registers_saved;
10373 ix86_finalize_stack_realign_flags ();
10375 /* DRAP should not coexist with stack_realign_fp */
10376 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
10378 memset (&m->fs, 0, sizeof (m->fs));
10380 /* Initialize CFA state for before the prologue. */
10381 m->fs.cfa_reg = stack_pointer_rtx;
10382 m->fs.cfa_offset = INCOMING_FRAME_SP_OFFSET;
10384 /* Track SP offset to the CFA. We continue tracking this after we've
10385 swapped the CFA register away from SP. In the case of re-alignment
10386 this is fudged; we're interested to offsets within the local frame. */
10387 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10388 m->fs.sp_valid = true;
10390 ix86_compute_frame_layout (&frame);
10392 if (!TARGET_64BIT && ix86_function_ms_hook_prologue (current_function_decl))
10394 /* We should have already generated an error for any use of
10395 ms_hook on a nested function. */
10396 gcc_checking_assert (!ix86_static_chain_on_stack);
10398 /* Check if profiling is active and we shall use profiling before
10399 prologue variant. If so sorry. */
10400 if (crtl->profile && flag_fentry != 0)
10401 sorry ("ms_hook_prologue attribute isn%'t compatible "
10402 "with -mfentry for 32-bit");
10404 /* In ix86_asm_output_function_label we emitted:
10405 8b ff movl.s %edi,%edi
10407 8b ec movl.s %esp,%ebp
10409 This matches the hookable function prologue in Win32 API
10410 functions in Microsoft Windows XP Service Pack 2 and newer.
10411 Wine uses this to enable Windows apps to hook the Win32 API
10412 functions provided by Wine.
10414 What that means is that we've already set up the frame pointer. */
10416 if (frame_pointer_needed
10417 && !(crtl->drap_reg && crtl->stack_realign_needed))
10421 /* We've decided to use the frame pointer already set up.
10422 Describe this to the unwinder by pretending that both
10423 push and mov insns happen right here.
10425 Putting the unwind info here at the end of the ms_hook
10426 is done so that we can make absolutely certain we get
10427 the required byte sequence at the start of the function,
10428 rather than relying on an assembler that can produce
10429 the exact encoding required.
10431 However it does mean (in the unpatched case) that we have
10432 a 1 insn window where the asynchronous unwind info is
10433 incorrect. However, if we placed the unwind info at
10434 its correct location we would have incorrect unwind info
10435 in the patched case. Which is probably all moot since
10436 I don't expect Wine generates dwarf2 unwind info for the
10437 system libraries that use this feature. */
10439 insn = emit_insn (gen_blockage ());
10441 push = gen_push (hard_frame_pointer_rtx);
10442 mov = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
10443 stack_pointer_rtx);
10444 RTX_FRAME_RELATED_P (push) = 1;
10445 RTX_FRAME_RELATED_P (mov) = 1;
10447 RTX_FRAME_RELATED_P (insn) = 1;
10448 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10449 gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, push, mov)));
10451 /* Note that gen_push incremented m->fs.cfa_offset, even
10452 though we didn't emit the push insn here. */
10453 m->fs.cfa_reg = hard_frame_pointer_rtx;
10454 m->fs.fp_offset = m->fs.cfa_offset;
10455 m->fs.fp_valid = true;
10459 /* The frame pointer is not needed so pop %ebp again.
10460 This leaves us with a pristine state. */
10461 emit_insn (gen_pop (hard_frame_pointer_rtx));
10465 /* The first insn of a function that accepts its static chain on the
10466 stack is to push the register that would be filled in by a direct
10467 call. This insn will be skipped by the trampoline. */
10468 else if (ix86_static_chain_on_stack)
10470 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
10471 emit_insn (gen_blockage ());
10473 /* We don't want to interpret this push insn as a register save,
10474 only as a stack adjustment. The real copy of the register as
10475 a save will be done later, if needed. */
10476 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
10477 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
10478 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
10479 RTX_FRAME_RELATED_P (insn) = 1;
10482 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
10483 of DRAP is needed and stack realignment is really needed after reload */
10484 if (stack_realign_drap)
10486 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10488 /* Only need to push parameter pointer reg if it is caller saved. */
10489 if (!call_used_regs[REGNO (crtl->drap_reg)])
10491 /* Push arg pointer reg */
10492 insn = emit_insn (gen_push (crtl->drap_reg));
10493 RTX_FRAME_RELATED_P (insn) = 1;
10496 /* Grab the argument pointer. */
10497 t = plus_constant (stack_pointer_rtx, m->fs.sp_offset);
10498 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10499 RTX_FRAME_RELATED_P (insn) = 1;
10500 m->fs.cfa_reg = crtl->drap_reg;
10501 m->fs.cfa_offset = 0;
10503 /* Align the stack. */
10504 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10506 GEN_INT (-align_bytes)));
10507 RTX_FRAME_RELATED_P (insn) = 1;
10509 /* Replicate the return address on the stack so that return
10510 address can be reached via (argp - 1) slot. This is needed
10511 to implement macro RETURN_ADDR_RTX and intrinsic function
10512 expand_builtin_return_addr etc. */
10513 t = plus_constant (crtl->drap_reg, -UNITS_PER_WORD);
10514 t = gen_frame_mem (Pmode, t);
10515 insn = emit_insn (gen_push (t));
10516 RTX_FRAME_RELATED_P (insn) = 1;
10518 /* For the purposes of frame and register save area addressing,
10519 we've started over with a new frame. */
10520 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10521 m->fs.realigned = true;
10524 if (frame_pointer_needed && !m->fs.fp_valid)
10526 /* Note: AT&T enter does NOT have reversed args. Enter is probably
10527 slower on all targets. Also sdb doesn't like it. */
10528 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
10529 RTX_FRAME_RELATED_P (insn) = 1;
10531 if (m->fs.sp_offset == frame.hard_frame_pointer_offset)
10533 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10534 RTX_FRAME_RELATED_P (insn) = 1;
10536 if (m->fs.cfa_reg == stack_pointer_rtx)
10537 m->fs.cfa_reg = hard_frame_pointer_rtx;
10538 m->fs.fp_offset = m->fs.sp_offset;
10539 m->fs.fp_valid = true;
10543 int_registers_saved = (frame.nregs == 0);
10545 if (!int_registers_saved)
10547 /* If saving registers via PUSH, do so now. */
10548 if (!frame.save_regs_using_mov)
10550 ix86_emit_save_regs ();
10551 int_registers_saved = true;
10552 gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
10555 /* When using red zone we may start register saving before allocating
10556 the stack frame saving one cycle of the prologue. However, avoid
10557 doing this if we have to probe the stack; at least on x86_64 the
10558 stack probe can turn into a call that clobbers a red zone location. */
10559 else if (ix86_using_red_zone ()
10560 && (! TARGET_STACK_PROBE
10561 || frame.stack_pointer_offset < CHECK_STACK_LIMIT))
10563 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10564 int_registers_saved = true;
10568 if (stack_realign_fp)
10570 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10571 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
10573 /* The computation of the size of the re-aligned stack frame means
10574 that we must allocate the size of the register save area before
10575 performing the actual alignment. Otherwise we cannot guarantee
10576 that there's enough storage above the realignment point. */
10577 if (m->fs.sp_offset != frame.sse_reg_save_offset)
10578 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10579 GEN_INT (m->fs.sp_offset
10580 - frame.sse_reg_save_offset),
10583 /* Align the stack. */
10584 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10586 GEN_INT (-align_bytes)));
10588 /* For the purposes of register save area addressing, the stack
10589 pointer is no longer valid. As for the value of sp_offset,
10590 see ix86_compute_frame_layout, which we need to match in order
10591 to pass verification of stack_pointer_offset at the end. */
10592 m->fs.sp_offset = (m->fs.sp_offset + align_bytes) & -align_bytes;
10593 m->fs.sp_valid = false;
10596 allocate = frame.stack_pointer_offset - m->fs.sp_offset;
10598 if (flag_stack_usage)
10600 /* We start to count from ARG_POINTER. */
10601 HOST_WIDE_INT stack_size = frame.stack_pointer_offset;
10603 /* If it was realigned, take into account the fake frame. */
10604 if (stack_realign_drap)
10606 if (ix86_static_chain_on_stack)
10607 stack_size += UNITS_PER_WORD;
10609 if (!call_used_regs[REGNO (crtl->drap_reg)])
10610 stack_size += UNITS_PER_WORD;
10612 /* This over-estimates by 1 minimal-stack-alignment-unit but
10613 mitigates that by counting in the new return address slot. */
10614 current_function_dynamic_stack_size
10615 += crtl->stack_alignment_needed / BITS_PER_UNIT;
10618 current_function_static_stack_size = stack_size;
10621 /* The stack has already been decremented by the instruction calling us
10622 so probe if the size is non-negative to preserve the protection area. */
10623 if (allocate >= 0 && flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
10625 /* We expect the registers to be saved when probes are used. */
10626 gcc_assert (int_registers_saved);
10628 if (STACK_CHECK_MOVING_SP)
10630 ix86_adjust_stack_and_probe (allocate);
10635 HOST_WIDE_INT size = allocate;
10637 if (TARGET_64BIT && size >= (HOST_WIDE_INT) 0x80000000)
10638 size = 0x80000000 - STACK_CHECK_PROTECT - 1;
10640 if (TARGET_STACK_PROBE)
10641 ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
10643 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
10649 else if (!ix86_target_stack_probe ()
10650 || frame.stack_pointer_offset < CHECK_STACK_LIMIT)
10652 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10653 GEN_INT (-allocate), -1,
10654 m->fs.cfa_reg == stack_pointer_rtx);
10658 rtx eax = gen_rtx_REG (Pmode, AX_REG);
10660 rtx (*adjust_stack_insn)(rtx, rtx, rtx);
10662 bool eax_live = false;
10663 bool r10_live = false;
10666 r10_live = (DECL_STATIC_CHAIN (current_function_decl) != 0);
10667 if (!TARGET_64BIT_MS_ABI)
10668 eax_live = ix86_eax_live_at_start_p ();
10672 emit_insn (gen_push (eax));
10673 allocate -= UNITS_PER_WORD;
10677 r10 = gen_rtx_REG (Pmode, R10_REG);
10678 emit_insn (gen_push (r10));
10679 allocate -= UNITS_PER_WORD;
10682 emit_move_insn (eax, GEN_INT (allocate));
10683 emit_insn (ix86_gen_allocate_stack_worker (eax, eax));
10685 /* Use the fact that AX still contains ALLOCATE. */
10686 adjust_stack_insn = (TARGET_64BIT
10687 ? gen_pro_epilogue_adjust_stack_di_sub
10688 : gen_pro_epilogue_adjust_stack_si_sub);
10690 insn = emit_insn (adjust_stack_insn (stack_pointer_rtx,
10691 stack_pointer_rtx, eax));
10693 /* Note that SEH directives need to continue tracking the stack
10694 pointer even after the frame pointer has been set up. */
10695 if (m->fs.cfa_reg == stack_pointer_rtx || TARGET_SEH)
10697 if (m->fs.cfa_reg == stack_pointer_rtx)
10698 m->fs.cfa_offset += allocate;
10700 RTX_FRAME_RELATED_P (insn) = 1;
10701 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10702 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10703 plus_constant (stack_pointer_rtx,
10706 m->fs.sp_offset += allocate;
10708 if (r10_live && eax_live)
10710 t = choose_baseaddr (m->fs.sp_offset - allocate);
10711 emit_move_insn (r10, gen_frame_mem (Pmode, t));
10712 t = choose_baseaddr (m->fs.sp_offset - allocate - UNITS_PER_WORD);
10713 emit_move_insn (eax, gen_frame_mem (Pmode, t));
10715 else if (eax_live || r10_live)
10717 t = choose_baseaddr (m->fs.sp_offset - allocate);
10718 emit_move_insn ((eax_live ? eax : r10), gen_frame_mem (Pmode, t));
10721 gcc_assert (m->fs.sp_offset == frame.stack_pointer_offset);
10723 /* If we havn't already set up the frame pointer, do so now. */
10724 if (frame_pointer_needed && !m->fs.fp_valid)
10726 insn = ix86_gen_add3 (hard_frame_pointer_rtx, stack_pointer_rtx,
10727 GEN_INT (frame.stack_pointer_offset
10728 - frame.hard_frame_pointer_offset));
10729 insn = emit_insn (insn);
10730 RTX_FRAME_RELATED_P (insn) = 1;
10731 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
10733 if (m->fs.cfa_reg == stack_pointer_rtx)
10734 m->fs.cfa_reg = hard_frame_pointer_rtx;
10735 m->fs.fp_offset = frame.hard_frame_pointer_offset;
10736 m->fs.fp_valid = true;
10739 if (!int_registers_saved)
10740 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10741 if (frame.nsseregs)
10742 ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
10744 pic_reg_used = false;
10745 if (pic_offset_table_rtx
10746 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
10749 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
10751 if (alt_pic_reg_used != INVALID_REGNUM)
10752 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
10754 pic_reg_used = true;
10761 if (ix86_cmodel == CM_LARGE_PIC)
10763 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
10764 rtx label = gen_label_rtx ();
10765 emit_label (label);
10766 LABEL_PRESERVE_P (label) = 1;
10767 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
10768 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
10769 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
10770 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
10771 pic_offset_table_rtx, tmp_reg));
10774 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
10777 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
10780 /* In the pic_reg_used case, make sure that the got load isn't deleted
10781 when mcount needs it. Blockage to avoid call movement across mcount
10782 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
10784 if (crtl->profile && !flag_fentry && pic_reg_used)
10785 emit_insn (gen_prologue_use (pic_offset_table_rtx));
10787 if (crtl->drap_reg && !crtl->stack_realign_needed)
10789 /* vDRAP is setup but after reload it turns out stack realign
10790 isn't necessary, here we will emit prologue to setup DRAP
10791 without stack realign adjustment */
10792 t = choose_baseaddr (0);
10793 emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10796 /* Prevent instructions from being scheduled into register save push
10797 sequence when access to the redzone area is done through frame pointer.
10798 The offset between the frame pointer and the stack pointer is calculated
10799 relative to the value of the stack pointer at the end of the function
10800 prologue, and moving instructions that access redzone area via frame
10801 pointer inside push sequence violates this assumption. */
10802 if (frame_pointer_needed && frame.red_zone_size)
10803 emit_insn (gen_memory_blockage ());
10805 /* Emit cld instruction if stringops are used in the function. */
10806 if (TARGET_CLD && ix86_current_function_needs_cld)
10807 emit_insn (gen_cld ());
10809 /* SEH requires that the prologue end within 256 bytes of the start of
10810 the function. Prevent instruction schedules that would extend that. */
10812 emit_insn (gen_blockage ());
10815 /* Emit code to restore REG using a POP insn. */
10818 ix86_emit_restore_reg_using_pop (rtx reg)
10820 struct machine_function *m = cfun->machine;
10821 rtx insn = emit_insn (gen_pop (reg));
10823 ix86_add_cfa_restore_note (insn, reg, m->fs.sp_offset);
10824 m->fs.sp_offset -= UNITS_PER_WORD;
10826 if (m->fs.cfa_reg == crtl->drap_reg
10827 && REGNO (reg) == REGNO (crtl->drap_reg))
10829 /* Previously we'd represented the CFA as an expression
10830 like *(%ebp - 8). We've just popped that value from
10831 the stack, which means we need to reset the CFA to
10832 the drap register. This will remain until we restore
10833 the stack pointer. */
10834 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10835 RTX_FRAME_RELATED_P (insn) = 1;
10837 /* This means that the DRAP register is valid for addressing too. */
10838 m->fs.drap_valid = true;
10842 if (m->fs.cfa_reg == stack_pointer_rtx)
10844 rtx x = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
10845 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10846 add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
10847 RTX_FRAME_RELATED_P (insn) = 1;
10849 m->fs.cfa_offset -= UNITS_PER_WORD;
10852 /* When the frame pointer is the CFA, and we pop it, we are
10853 swapping back to the stack pointer as the CFA. This happens
10854 for stack frames that don't allocate other data, so we assume
10855 the stack pointer is now pointing at the return address, i.e.
10856 the function entry state, which makes the offset be 1 word. */
10857 if (reg == hard_frame_pointer_rtx)
10859 m->fs.fp_valid = false;
10860 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10862 m->fs.cfa_reg = stack_pointer_rtx;
10863 m->fs.cfa_offset -= UNITS_PER_WORD;
10865 add_reg_note (insn, REG_CFA_DEF_CFA,
10866 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10867 GEN_INT (m->fs.cfa_offset)));
10868 RTX_FRAME_RELATED_P (insn) = 1;
10873 /* Emit code to restore saved registers using POP insns. */
10876 ix86_emit_restore_regs_using_pop (void)
10878 unsigned int regno;
10880 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10881 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
10882 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno));
10885 /* Emit code and notes for the LEAVE instruction. */
10888 ix86_emit_leave (void)
10890 struct machine_function *m = cfun->machine;
10891 rtx insn = emit_insn (ix86_gen_leave ());
10893 ix86_add_queued_cfa_restore_notes (insn);
10895 gcc_assert (m->fs.fp_valid);
10896 m->fs.sp_valid = true;
10897 m->fs.sp_offset = m->fs.fp_offset - UNITS_PER_WORD;
10898 m->fs.fp_valid = false;
10900 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10902 m->fs.cfa_reg = stack_pointer_rtx;
10903 m->fs.cfa_offset = m->fs.sp_offset;
10905 add_reg_note (insn, REG_CFA_DEF_CFA,
10906 plus_constant (stack_pointer_rtx, m->fs.sp_offset));
10907 RTX_FRAME_RELATED_P (insn) = 1;
10908 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx,
10913 /* Emit code to restore saved registers using MOV insns.
10914 First register is restored from CFA - CFA_OFFSET. */
10916 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset,
10917 int maybe_eh_return)
10919 struct machine_function *m = cfun->machine;
10920 unsigned int regno;
10922 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10923 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10925 rtx reg = gen_rtx_REG (Pmode, regno);
10928 mem = choose_baseaddr (cfa_offset);
10929 mem = gen_frame_mem (Pmode, mem);
10930 insn = emit_move_insn (reg, mem);
10932 if (m->fs.cfa_reg == crtl->drap_reg && regno == REGNO (crtl->drap_reg))
10934 /* Previously we'd represented the CFA as an expression
10935 like *(%ebp - 8). We've just popped that value from
10936 the stack, which means we need to reset the CFA to
10937 the drap register. This will remain until we restore
10938 the stack pointer. */
10939 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10940 RTX_FRAME_RELATED_P (insn) = 1;
10942 /* This means that the DRAP register is valid for addressing. */
10943 m->fs.drap_valid = true;
10946 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10948 cfa_offset -= UNITS_PER_WORD;
10952 /* Emit code to restore saved registers using MOV insns.
10953 First register is restored from CFA - CFA_OFFSET. */
10955 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset,
10956 int maybe_eh_return)
10958 unsigned int regno;
10960 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10961 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10963 rtx reg = gen_rtx_REG (V4SFmode, regno);
10966 mem = choose_baseaddr (cfa_offset);
10967 mem = gen_rtx_MEM (V4SFmode, mem);
10968 set_mem_align (mem, 128);
10969 emit_move_insn (reg, mem);
10971 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10977 /* Restore function stack, frame, and registers. */
10980 ix86_expand_epilogue (int style)
10982 struct machine_function *m = cfun->machine;
10983 struct machine_frame_state frame_state_save = m->fs;
10984 struct ix86_frame frame;
10985 bool restore_regs_via_mov;
10988 ix86_finalize_stack_realign_flags ();
10989 ix86_compute_frame_layout (&frame);
10991 m->fs.sp_valid = (!frame_pointer_needed
10992 || (current_function_sp_is_unchanging
10993 && !stack_realign_fp));
10994 gcc_assert (!m->fs.sp_valid
10995 || m->fs.sp_offset == frame.stack_pointer_offset);
10997 /* The FP must be valid if the frame pointer is present. */
10998 gcc_assert (frame_pointer_needed == m->fs.fp_valid);
10999 gcc_assert (!m->fs.fp_valid
11000 || m->fs.fp_offset == frame.hard_frame_pointer_offset);
11002 /* We must have *some* valid pointer to the stack frame. */
11003 gcc_assert (m->fs.sp_valid || m->fs.fp_valid);
11005 /* The DRAP is never valid at this point. */
11006 gcc_assert (!m->fs.drap_valid);
11008 /* See the comment about red zone and frame
11009 pointer usage in ix86_expand_prologue. */
11010 if (frame_pointer_needed && frame.red_zone_size)
11011 emit_insn (gen_memory_blockage ());
11013 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
11014 gcc_assert (!using_drap || m->fs.cfa_reg == crtl->drap_reg);
11016 /* Determine the CFA offset of the end of the red-zone. */
11017 m->fs.red_zone_offset = 0;
11018 if (ix86_using_red_zone () && crtl->args.pops_args < 65536)
11020 /* The red-zone begins below the return address. */
11021 m->fs.red_zone_offset = RED_ZONE_SIZE + UNITS_PER_WORD;
11023 /* When the register save area is in the aligned portion of
11024 the stack, determine the maximum runtime displacement that
11025 matches up with the aligned frame. */
11026 if (stack_realign_drap)
11027 m->fs.red_zone_offset -= (crtl->stack_alignment_needed / BITS_PER_UNIT
11031 /* Special care must be taken for the normal return case of a function
11032 using eh_return: the eax and edx registers are marked as saved, but
11033 not restored along this path. Adjust the save location to match. */
11034 if (crtl->calls_eh_return && style != 2)
11035 frame.reg_save_offset -= 2 * UNITS_PER_WORD;
11037 /* EH_RETURN requires the use of moves to function properly. */
11038 if (crtl->calls_eh_return)
11039 restore_regs_via_mov = true;
11040 /* SEH requires the use of pops to identify the epilogue. */
11041 else if (TARGET_SEH)
11042 restore_regs_via_mov = false;
11043 /* If we're only restoring one register and sp is not valid then
11044 using a move instruction to restore the register since it's
11045 less work than reloading sp and popping the register. */
11046 else if (!m->fs.sp_valid && frame.nregs <= 1)
11047 restore_regs_via_mov = true;
11048 else if (TARGET_EPILOGUE_USING_MOVE
11049 && cfun->machine->use_fast_prologue_epilogue
11050 && (frame.nregs > 1
11051 || m->fs.sp_offset != frame.reg_save_offset))
11052 restore_regs_via_mov = true;
11053 else if (frame_pointer_needed
11055 && m->fs.sp_offset != frame.reg_save_offset)
11056 restore_regs_via_mov = true;
11057 else if (frame_pointer_needed
11058 && TARGET_USE_LEAVE
11059 && cfun->machine->use_fast_prologue_epilogue
11060 && frame.nregs == 1)
11061 restore_regs_via_mov = true;
11063 restore_regs_via_mov = false;
11065 if (restore_regs_via_mov || frame.nsseregs)
11067 /* Ensure that the entire register save area is addressable via
11068 the stack pointer, if we will restore via sp. */
11070 && m->fs.sp_offset > 0x7fffffff
11071 && !(m->fs.fp_valid || m->fs.drap_valid)
11072 && (frame.nsseregs + frame.nregs) != 0)
11074 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11075 GEN_INT (m->fs.sp_offset
11076 - frame.sse_reg_save_offset),
11078 m->fs.cfa_reg == stack_pointer_rtx);
11082 /* If there are any SSE registers to restore, then we have to do it
11083 via moves, since there's obviously no pop for SSE regs. */
11084 if (frame.nsseregs)
11085 ix86_emit_restore_sse_regs_using_mov (frame.sse_reg_save_offset,
11088 if (restore_regs_via_mov)
11093 ix86_emit_restore_regs_using_mov (frame.reg_save_offset, style == 2);
11095 /* eh_return epilogues need %ecx added to the stack pointer. */
11098 rtx insn, sa = EH_RETURN_STACKADJ_RTX;
11100 /* Stack align doesn't work with eh_return. */
11101 gcc_assert (!stack_realign_drap);
11102 /* Neither does regparm nested functions. */
11103 gcc_assert (!ix86_static_chain_on_stack);
11105 if (frame_pointer_needed)
11107 t = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
11108 t = plus_constant (t, m->fs.fp_offset - UNITS_PER_WORD);
11109 emit_insn (gen_rtx_SET (VOIDmode, sa, t));
11111 t = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
11112 insn = emit_move_insn (hard_frame_pointer_rtx, t);
11114 /* Note that we use SA as a temporary CFA, as the return
11115 address is at the proper place relative to it. We
11116 pretend this happens at the FP restore insn because
11117 prior to this insn the FP would be stored at the wrong
11118 offset relative to SA, and after this insn we have no
11119 other reasonable register to use for the CFA. We don't
11120 bother resetting the CFA to the SP for the duration of
11121 the return insn. */
11122 add_reg_note (insn, REG_CFA_DEF_CFA,
11123 plus_constant (sa, UNITS_PER_WORD));
11124 ix86_add_queued_cfa_restore_notes (insn);
11125 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
11126 RTX_FRAME_RELATED_P (insn) = 1;
11128 m->fs.cfa_reg = sa;
11129 m->fs.cfa_offset = UNITS_PER_WORD;
11130 m->fs.fp_valid = false;
11132 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
11133 const0_rtx, style, false);
11137 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
11138 t = plus_constant (t, m->fs.sp_offset - UNITS_PER_WORD);
11139 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, t));
11140 ix86_add_queued_cfa_restore_notes (insn);
11142 gcc_assert (m->fs.cfa_reg == stack_pointer_rtx);
11143 if (m->fs.cfa_offset != UNITS_PER_WORD)
11145 m->fs.cfa_offset = UNITS_PER_WORD;
11146 add_reg_note (insn, REG_CFA_DEF_CFA,
11147 plus_constant (stack_pointer_rtx,
11149 RTX_FRAME_RELATED_P (insn) = 1;
11152 m->fs.sp_offset = UNITS_PER_WORD;
11153 m->fs.sp_valid = true;
11158 /* SEH requires that the function end with (1) a stack adjustment
11159 if necessary, (2) a sequence of pops, and (3) a return or
11160 jump instruction. Prevent insns from the function body from
11161 being scheduled into this sequence. */
11164 /* Prevent a catch region from being adjacent to the standard
11165 epilogue sequence. Unfortuantely crtl->uses_eh_lsda nor
11166 several other flags that would be interesting to test are
11168 if (flag_non_call_exceptions)
11169 emit_insn (gen_nops (const1_rtx));
11171 emit_insn (gen_blockage ());
11174 /* First step is to deallocate the stack frame so that we can
11175 pop the registers. */
11176 if (!m->fs.sp_valid)
11178 pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx,
11179 GEN_INT (m->fs.fp_offset
11180 - frame.reg_save_offset),
11183 else if (m->fs.sp_offset != frame.reg_save_offset)
11185 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11186 GEN_INT (m->fs.sp_offset
11187 - frame.reg_save_offset),
11189 m->fs.cfa_reg == stack_pointer_rtx);
11192 ix86_emit_restore_regs_using_pop ();
11195 /* If we used a stack pointer and haven't already got rid of it,
11197 if (m->fs.fp_valid)
11199 /* If the stack pointer is valid and pointing at the frame
11200 pointer store address, then we only need a pop. */
11201 if (m->fs.sp_valid && m->fs.sp_offset == frame.hfp_save_offset)
11202 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
11203 /* Leave results in shorter dependency chains on CPUs that are
11204 able to grok it fast. */
11205 else if (TARGET_USE_LEAVE
11206 || optimize_function_for_size_p (cfun)
11207 || !cfun->machine->use_fast_prologue_epilogue)
11208 ix86_emit_leave ();
11211 pro_epilogue_adjust_stack (stack_pointer_rtx,
11212 hard_frame_pointer_rtx,
11213 const0_rtx, style, !using_drap);
11214 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
11220 int param_ptr_offset = UNITS_PER_WORD;
11223 gcc_assert (stack_realign_drap);
11225 if (ix86_static_chain_on_stack)
11226 param_ptr_offset += UNITS_PER_WORD;
11227 if (!call_used_regs[REGNO (crtl->drap_reg)])
11228 param_ptr_offset += UNITS_PER_WORD;
11230 insn = emit_insn (gen_rtx_SET
11231 (VOIDmode, stack_pointer_rtx,
11232 gen_rtx_PLUS (Pmode,
11234 GEN_INT (-param_ptr_offset))));
11235 m->fs.cfa_reg = stack_pointer_rtx;
11236 m->fs.cfa_offset = param_ptr_offset;
11237 m->fs.sp_offset = param_ptr_offset;
11238 m->fs.realigned = false;
11240 add_reg_note (insn, REG_CFA_DEF_CFA,
11241 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11242 GEN_INT (param_ptr_offset)));
11243 RTX_FRAME_RELATED_P (insn) = 1;
11245 if (!call_used_regs[REGNO (crtl->drap_reg)])
11246 ix86_emit_restore_reg_using_pop (crtl->drap_reg);
11249 /* At this point the stack pointer must be valid, and we must have
11250 restored all of the registers. We may not have deallocated the
11251 entire stack frame. We've delayed this until now because it may
11252 be possible to merge the local stack deallocation with the
11253 deallocation forced by ix86_static_chain_on_stack. */
11254 gcc_assert (m->fs.sp_valid);
11255 gcc_assert (!m->fs.fp_valid);
11256 gcc_assert (!m->fs.realigned);
11257 if (m->fs.sp_offset != UNITS_PER_WORD)
11259 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11260 GEN_INT (m->fs.sp_offset - UNITS_PER_WORD),
11264 /* Sibcall epilogues don't want a return instruction. */
11267 m->fs = frame_state_save;
11271 /* Emit vzeroupper if needed. */
11272 if (TARGET_VZEROUPPER
11273 && !TREE_THIS_VOLATILE (cfun->decl)
11274 && !cfun->machine->caller_return_avx256_p)
11275 emit_insn (gen_avx_vzeroupper (GEN_INT (call_no_avx256)));
11277 if (crtl->args.pops_args && crtl->args.size)
11279 rtx popc = GEN_INT (crtl->args.pops_args);
11281 /* i386 can only pop 64K bytes. If asked to pop more, pop return
11282 address, do explicit add, and jump indirectly to the caller. */
11284 if (crtl->args.pops_args >= 65536)
11286 rtx ecx = gen_rtx_REG (SImode, CX_REG);
11289 /* There is no "pascal" calling convention in any 64bit ABI. */
11290 gcc_assert (!TARGET_64BIT);
11292 insn = emit_insn (gen_pop (ecx));
11293 m->fs.cfa_offset -= UNITS_PER_WORD;
11294 m->fs.sp_offset -= UNITS_PER_WORD;
11296 add_reg_note (insn, REG_CFA_ADJUST_CFA,
11297 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
11298 add_reg_note (insn, REG_CFA_REGISTER,
11299 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
11300 RTX_FRAME_RELATED_P (insn) = 1;
11302 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
11304 emit_jump_insn (gen_return_indirect_internal (ecx));
11307 emit_jump_insn (gen_return_pop_internal (popc));
11310 emit_jump_insn (gen_return_internal ());
11312 /* Restore the state back to the state from the prologue,
11313 so that it's correct for the next epilogue. */
11314 m->fs = frame_state_save;
11317 /* Reset from the function's potential modifications. */
11320 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
11321 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
11323 if (pic_offset_table_rtx)
11324 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
11326 /* Mach-O doesn't support labels at the end of objects, so if
11327 it looks like we might want one, insert a NOP. */
11329 rtx insn = get_last_insn ();
11332 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
11333 insn = PREV_INSN (insn);
11337 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
11338 fputs ("\tnop\n", file);
11344 /* Return a scratch register to use in the split stack prologue. The
11345 split stack prologue is used for -fsplit-stack. It is the first
11346 instructions in the function, even before the regular prologue.
11347 The scratch register can be any caller-saved register which is not
11348 used for parameters or for the static chain. */
11350 static unsigned int
11351 split_stack_prologue_scratch_regno (void)
11360 is_fastcall = (lookup_attribute ("fastcall",
11361 TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
11363 regparm = ix86_function_regparm (TREE_TYPE (cfun->decl), cfun->decl);
11367 if (DECL_STATIC_CHAIN (cfun->decl))
11369 sorry ("-fsplit-stack does not support fastcall with "
11370 "nested function");
11371 return INVALID_REGNUM;
11375 else if (regparm < 3)
11377 if (!DECL_STATIC_CHAIN (cfun->decl))
11383 sorry ("-fsplit-stack does not support 2 register "
11384 " parameters for a nested function");
11385 return INVALID_REGNUM;
11392 /* FIXME: We could make this work by pushing a register
11393 around the addition and comparison. */
11394 sorry ("-fsplit-stack does not support 3 register parameters");
11395 return INVALID_REGNUM;
11400 /* A SYMBOL_REF for the function which allocates new stackspace for
11403 static GTY(()) rtx split_stack_fn;
11405 /* A SYMBOL_REF for the more stack function when using the large
11408 static GTY(()) rtx split_stack_fn_large;
11410 /* Handle -fsplit-stack. These are the first instructions in the
11411 function, even before the regular prologue. */
11414 ix86_expand_split_stack_prologue (void)
11416 struct ix86_frame frame;
11417 HOST_WIDE_INT allocate;
11418 unsigned HOST_WIDE_INT args_size;
11419 rtx label, limit, current, jump_insn, allocate_rtx, call_insn, call_fusage;
11420 rtx scratch_reg = NULL_RTX;
11421 rtx varargs_label = NULL_RTX;
11424 gcc_assert (flag_split_stack && reload_completed);
11426 ix86_finalize_stack_realign_flags ();
11427 ix86_compute_frame_layout (&frame);
11428 allocate = frame.stack_pointer_offset - INCOMING_FRAME_SP_OFFSET;
11430 /* This is the label we will branch to if we have enough stack
11431 space. We expect the basic block reordering pass to reverse this
11432 branch if optimizing, so that we branch in the unlikely case. */
11433 label = gen_label_rtx ();
11435 /* We need to compare the stack pointer minus the frame size with
11436 the stack boundary in the TCB. The stack boundary always gives
11437 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
11438 can compare directly. Otherwise we need to do an addition. */
11440 limit = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
11441 UNSPEC_STACK_CHECK);
11442 limit = gen_rtx_CONST (Pmode, limit);
11443 limit = gen_rtx_MEM (Pmode, limit);
11444 if (allocate < SPLIT_STACK_AVAILABLE)
11445 current = stack_pointer_rtx;
11448 unsigned int scratch_regno;
11451 /* We need a scratch register to hold the stack pointer minus
11452 the required frame size. Since this is the very start of the
11453 function, the scratch register can be any caller-saved
11454 register which is not used for parameters. */
11455 offset = GEN_INT (- allocate);
11456 scratch_regno = split_stack_prologue_scratch_regno ();
11457 if (scratch_regno == INVALID_REGNUM)
11459 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11460 if (!TARGET_64BIT || x86_64_immediate_operand (offset, Pmode))
11462 /* We don't use ix86_gen_add3 in this case because it will
11463 want to split to lea, but when not optimizing the insn
11464 will not be split after this point. */
11465 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11466 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11471 emit_move_insn (scratch_reg, offset);
11472 emit_insn (gen_adddi3 (scratch_reg, scratch_reg,
11473 stack_pointer_rtx));
11475 current = scratch_reg;
11478 ix86_expand_branch (GEU, current, limit, label);
11479 jump_insn = get_last_insn ();
11480 JUMP_LABEL (jump_insn) = label;
11482 /* Mark the jump as very likely to be taken. */
11483 add_reg_note (jump_insn, REG_BR_PROB,
11484 GEN_INT (REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100));
11486 if (split_stack_fn == NULL_RTX)
11487 split_stack_fn = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11488 fn = split_stack_fn;
11490 /* Get more stack space. We pass in the desired stack space and the
11491 size of the arguments to copy to the new stack. In 32-bit mode
11492 we push the parameters; __morestack will return on a new stack
11493 anyhow. In 64-bit mode we pass the parameters in r10 and
11495 allocate_rtx = GEN_INT (allocate);
11496 args_size = crtl->args.size >= 0 ? crtl->args.size : 0;
11497 call_fusage = NULL_RTX;
11502 reg10 = gen_rtx_REG (Pmode, R10_REG);
11503 reg11 = gen_rtx_REG (Pmode, R11_REG);
11505 /* If this function uses a static chain, it will be in %r10.
11506 Preserve it across the call to __morestack. */
11507 if (DECL_STATIC_CHAIN (cfun->decl))
11511 rax = gen_rtx_REG (Pmode, AX_REG);
11512 emit_move_insn (rax, reg10);
11513 use_reg (&call_fusage, rax);
11516 if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
11518 HOST_WIDE_INT argval;
11520 /* When using the large model we need to load the address
11521 into a register, and we've run out of registers. So we
11522 switch to a different calling convention, and we call a
11523 different function: __morestack_large. We pass the
11524 argument size in the upper 32 bits of r10 and pass the
11525 frame size in the lower 32 bits. */
11526 gcc_assert ((allocate & (HOST_WIDE_INT) 0xffffffff) == allocate);
11527 gcc_assert ((args_size & 0xffffffff) == args_size);
11529 if (split_stack_fn_large == NULL_RTX)
11530 split_stack_fn_large =
11531 gen_rtx_SYMBOL_REF (Pmode, "__morestack_large_model");
11533 if (ix86_cmodel == CM_LARGE_PIC)
11537 label = gen_label_rtx ();
11538 emit_label (label);
11539 LABEL_PRESERVE_P (label) = 1;
11540 emit_insn (gen_set_rip_rex64 (reg10, label));
11541 emit_insn (gen_set_got_offset_rex64 (reg11, label));
11542 emit_insn (gen_adddi3 (reg10, reg10, reg11));
11543 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, split_stack_fn_large),
11545 x = gen_rtx_CONST (Pmode, x);
11546 emit_move_insn (reg11, x);
11547 x = gen_rtx_PLUS (Pmode, reg10, reg11);
11548 x = gen_const_mem (Pmode, x);
11549 emit_move_insn (reg11, x);
11552 emit_move_insn (reg11, split_stack_fn_large);
11556 argval = ((args_size << 16) << 16) + allocate;
11557 emit_move_insn (reg10, GEN_INT (argval));
11561 emit_move_insn (reg10, allocate_rtx);
11562 emit_move_insn (reg11, GEN_INT (args_size));
11563 use_reg (&call_fusage, reg11);
11566 use_reg (&call_fusage, reg10);
11570 emit_insn (gen_push (GEN_INT (args_size)));
11571 emit_insn (gen_push (allocate_rtx));
11573 call_insn = ix86_expand_call (NULL_RTX, gen_rtx_MEM (QImode, fn),
11574 GEN_INT (UNITS_PER_WORD), constm1_rtx,
11576 add_function_usage_to (call_insn, call_fusage);
11578 /* In order to make call/return prediction work right, we now need
11579 to execute a return instruction. See
11580 libgcc/config/i386/morestack.S for the details on how this works.
11582 For flow purposes gcc must not see this as a return
11583 instruction--we need control flow to continue at the subsequent
11584 label. Therefore, we use an unspec. */
11585 gcc_assert (crtl->args.pops_args < 65536);
11586 emit_insn (gen_split_stack_return (GEN_INT (crtl->args.pops_args)));
11588 /* If we are in 64-bit mode and this function uses a static chain,
11589 we saved %r10 in %rax before calling _morestack. */
11590 if (TARGET_64BIT && DECL_STATIC_CHAIN (cfun->decl))
11591 emit_move_insn (gen_rtx_REG (Pmode, R10_REG),
11592 gen_rtx_REG (Pmode, AX_REG));
11594 /* If this function calls va_start, we need to store a pointer to
11595 the arguments on the old stack, because they may not have been
11596 all copied to the new stack. At this point the old stack can be
11597 found at the frame pointer value used by __morestack, because
11598 __morestack has set that up before calling back to us. Here we
11599 store that pointer in a scratch register, and in
11600 ix86_expand_prologue we store the scratch register in a stack
11602 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11604 unsigned int scratch_regno;
11608 scratch_regno = split_stack_prologue_scratch_regno ();
11609 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11610 frame_reg = gen_rtx_REG (Pmode, BP_REG);
11614 return address within this function
11615 return address of caller of this function
11617 So we add three words to get to the stack arguments.
11621 return address within this function
11622 first argument to __morestack
11623 second argument to __morestack
11624 return address of caller of this function
11626 So we add five words to get to the stack arguments.
11628 words = TARGET_64BIT ? 3 : 5;
11629 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11630 gen_rtx_PLUS (Pmode, frame_reg,
11631 GEN_INT (words * UNITS_PER_WORD))));
11633 varargs_label = gen_label_rtx ();
11634 emit_jump_insn (gen_jump (varargs_label));
11635 JUMP_LABEL (get_last_insn ()) = varargs_label;
11640 emit_label (label);
11641 LABEL_NUSES (label) = 1;
11643 /* If this function calls va_start, we now have to set the scratch
11644 register for the case where we do not call __morestack. In this
11645 case we need to set it based on the stack pointer. */
11646 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11648 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11649 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11650 GEN_INT (UNITS_PER_WORD))));
11652 emit_label (varargs_label);
11653 LABEL_NUSES (varargs_label) = 1;
11657 /* We may have to tell the dataflow pass that the split stack prologue
11658 is initializing a scratch register. */
11661 ix86_live_on_entry (bitmap regs)
11663 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11665 gcc_assert (flag_split_stack);
11666 bitmap_set_bit (regs, split_stack_prologue_scratch_regno ());
11670 /* Extract the parts of an RTL expression that is a valid memory address
11671 for an instruction. Return 0 if the structure of the address is
11672 grossly off. Return -1 if the address contains ASHIFT, so it is not
11673 strictly valid, but still used for computing length of lea instruction. */
11676 ix86_decompose_address (rtx addr, struct ix86_address *out)
11678 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
11679 rtx base_reg, index_reg;
11680 HOST_WIDE_INT scale = 1;
11681 rtx scale_rtx = NULL_RTX;
11684 enum ix86_address_seg seg = SEG_DEFAULT;
11686 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
11688 else if (GET_CODE (addr) == PLUS)
11690 rtx addends[4], op;
11698 addends[n++] = XEXP (op, 1);
11701 while (GET_CODE (op) == PLUS);
11706 for (i = n; i >= 0; --i)
11709 switch (GET_CODE (op))
11714 index = XEXP (op, 0);
11715 scale_rtx = XEXP (op, 1);
11721 index = XEXP (op, 0);
11722 tmp = XEXP (op, 1);
11723 if (!CONST_INT_P (tmp))
11725 scale = INTVAL (tmp);
11726 if ((unsigned HOST_WIDE_INT) scale > 3)
11728 scale = 1 << scale;
11732 if (XINT (op, 1) == UNSPEC_TP
11733 && TARGET_TLS_DIRECT_SEG_REFS
11734 && seg == SEG_DEFAULT)
11735 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
11764 else if (GET_CODE (addr) == MULT)
11766 index = XEXP (addr, 0); /* index*scale */
11767 scale_rtx = XEXP (addr, 1);
11769 else if (GET_CODE (addr) == ASHIFT)
11771 /* We're called for lea too, which implements ashift on occasion. */
11772 index = XEXP (addr, 0);
11773 tmp = XEXP (addr, 1);
11774 if (!CONST_INT_P (tmp))
11776 scale = INTVAL (tmp);
11777 if ((unsigned HOST_WIDE_INT) scale > 3)
11779 scale = 1 << scale;
11783 disp = addr; /* displacement */
11785 /* Extract the integral value of scale. */
11788 if (!CONST_INT_P (scale_rtx))
11790 scale = INTVAL (scale_rtx);
11793 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
11794 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
11796 /* Avoid useless 0 displacement. */
11797 if (disp == const0_rtx && (base || index))
11800 /* Allow arg pointer and stack pointer as index if there is not scaling. */
11801 if (base_reg && index_reg && scale == 1
11802 && (index_reg == arg_pointer_rtx
11803 || index_reg == frame_pointer_rtx
11804 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
11807 tmp = base, base = index, index = tmp;
11808 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
11811 /* Special case: %ebp cannot be encoded as a base without a displacement.
11815 && (base_reg == hard_frame_pointer_rtx
11816 || base_reg == frame_pointer_rtx
11817 || base_reg == arg_pointer_rtx
11818 || (REG_P (base_reg)
11819 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
11820 || REGNO (base_reg) == R13_REG))))
11823 /* Special case: on K6, [%esi] makes the instruction vector decoded.
11824 Avoid this by transforming to [%esi+0].
11825 Reload calls address legitimization without cfun defined, so we need
11826 to test cfun for being non-NULL. */
11827 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
11828 && base_reg && !index_reg && !disp
11829 && REG_P (base_reg) && REGNO (base_reg) == SI_REG)
11832 /* Special case: encode reg+reg instead of reg*2. */
11833 if (!base && index && scale == 2)
11834 base = index, base_reg = index_reg, scale = 1;
11836 /* Special case: scaling cannot be encoded without base or displacement. */
11837 if (!base && !disp && index && scale != 1)
11841 out->index = index;
11843 out->scale = scale;
11849 /* Return cost of the memory address x.
11850 For i386, it is better to use a complex address than let gcc copy
11851 the address into a reg and make a new pseudo. But not if the address
11852 requires to two regs - that would mean more pseudos with longer
11855 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
11857 struct ix86_address parts;
11859 int ok = ix86_decompose_address (x, &parts);
11863 if (parts.base && GET_CODE (parts.base) == SUBREG)
11864 parts.base = SUBREG_REG (parts.base);
11865 if (parts.index && GET_CODE (parts.index) == SUBREG)
11866 parts.index = SUBREG_REG (parts.index);
11868 /* Attempt to minimize number of registers in the address. */
11870 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
11872 && (!REG_P (parts.index)
11873 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
11877 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
11879 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
11880 && parts.base != parts.index)
11883 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
11884 since it's predecode logic can't detect the length of instructions
11885 and it degenerates to vector decoded. Increase cost of such
11886 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
11887 to split such addresses or even refuse such addresses at all.
11889 Following addressing modes are affected:
11894 The first and last case may be avoidable by explicitly coding the zero in
11895 memory address, but I don't have AMD-K6 machine handy to check this
11899 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
11900 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
11901 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
11907 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
11908 this is used for to form addresses to local data when -fPIC is in
11912 darwin_local_data_pic (rtx disp)
11914 return (GET_CODE (disp) == UNSPEC
11915 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
11918 /* Determine if a given RTX is a valid constant. We already know this
11919 satisfies CONSTANT_P. */
11922 ix86_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
11924 switch (GET_CODE (x))
11929 if (GET_CODE (x) == PLUS)
11931 if (!CONST_INT_P (XEXP (x, 1)))
11936 if (TARGET_MACHO && darwin_local_data_pic (x))
11939 /* Only some unspecs are valid as "constants". */
11940 if (GET_CODE (x) == UNSPEC)
11941 switch (XINT (x, 1))
11944 case UNSPEC_GOTOFF:
11945 case UNSPEC_PLTOFF:
11946 return TARGET_64BIT;
11948 case UNSPEC_NTPOFF:
11949 x = XVECEXP (x, 0, 0);
11950 return (GET_CODE (x) == SYMBOL_REF
11951 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11952 case UNSPEC_DTPOFF:
11953 x = XVECEXP (x, 0, 0);
11954 return (GET_CODE (x) == SYMBOL_REF
11955 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
11960 /* We must have drilled down to a symbol. */
11961 if (GET_CODE (x) == LABEL_REF)
11963 if (GET_CODE (x) != SYMBOL_REF)
11968 /* TLS symbols are never valid. */
11969 if (SYMBOL_REF_TLS_MODEL (x))
11972 /* DLLIMPORT symbols are never valid. */
11973 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11974 && SYMBOL_REF_DLLIMPORT_P (x))
11978 /* mdynamic-no-pic */
11979 if (MACHO_DYNAMIC_NO_PIC_P)
11980 return machopic_symbol_defined_p (x);
11985 if (GET_MODE (x) == TImode
11986 && x != CONST0_RTX (TImode)
11992 if (!standard_sse_constant_p (x))
11999 /* Otherwise we handle everything else in the move patterns. */
12003 /* Determine if it's legal to put X into the constant pool. This
12004 is not possible for the address of thread-local symbols, which
12005 is checked above. */
12008 ix86_cannot_force_const_mem (enum machine_mode mode, rtx x)
12010 /* We can always put integral constants and vectors in memory. */
12011 switch (GET_CODE (x))
12021 return !ix86_legitimate_constant_p (mode, x);
12025 /* Nonzero if the constant value X is a legitimate general operand
12026 when generating PIC code. It is given that flag_pic is on and
12027 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
12030 legitimate_pic_operand_p (rtx x)
12034 switch (GET_CODE (x))
12037 inner = XEXP (x, 0);
12038 if (GET_CODE (inner) == PLUS
12039 && CONST_INT_P (XEXP (inner, 1)))
12040 inner = XEXP (inner, 0);
12042 /* Only some unspecs are valid as "constants". */
12043 if (GET_CODE (inner) == UNSPEC)
12044 switch (XINT (inner, 1))
12047 case UNSPEC_GOTOFF:
12048 case UNSPEC_PLTOFF:
12049 return TARGET_64BIT;
12051 x = XVECEXP (inner, 0, 0);
12052 return (GET_CODE (x) == SYMBOL_REF
12053 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
12054 case UNSPEC_MACHOPIC_OFFSET:
12055 return legitimate_pic_address_disp_p (x);
12063 return legitimate_pic_address_disp_p (x);
12070 /* Determine if a given CONST RTX is a valid memory displacement
12074 legitimate_pic_address_disp_p (rtx disp)
12078 /* In 64bit mode we can allow direct addresses of symbols and labels
12079 when they are not dynamic symbols. */
12082 rtx op0 = disp, op1;
12084 switch (GET_CODE (disp))
12090 if (GET_CODE (XEXP (disp, 0)) != PLUS)
12092 op0 = XEXP (XEXP (disp, 0), 0);
12093 op1 = XEXP (XEXP (disp, 0), 1);
12094 if (!CONST_INT_P (op1)
12095 || INTVAL (op1) >= 16*1024*1024
12096 || INTVAL (op1) < -16*1024*1024)
12098 if (GET_CODE (op0) == LABEL_REF)
12100 if (GET_CODE (op0) != SYMBOL_REF)
12105 /* TLS references should always be enclosed in UNSPEC. */
12106 if (SYMBOL_REF_TLS_MODEL (op0))
12108 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
12109 && ix86_cmodel != CM_LARGE_PIC)
12117 if (GET_CODE (disp) != CONST)
12119 disp = XEXP (disp, 0);
12123 /* We are unsafe to allow PLUS expressions. This limit allowed distance
12124 of GOT tables. We should not need these anyway. */
12125 if (GET_CODE (disp) != UNSPEC
12126 || (XINT (disp, 1) != UNSPEC_GOTPCREL
12127 && XINT (disp, 1) != UNSPEC_GOTOFF
12128 && XINT (disp, 1) != UNSPEC_PCREL
12129 && XINT (disp, 1) != UNSPEC_PLTOFF))
12132 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
12133 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
12139 if (GET_CODE (disp) == PLUS)
12141 if (!CONST_INT_P (XEXP (disp, 1)))
12143 disp = XEXP (disp, 0);
12147 if (TARGET_MACHO && darwin_local_data_pic (disp))
12150 if (GET_CODE (disp) != UNSPEC)
12153 switch (XINT (disp, 1))
12158 /* We need to check for both symbols and labels because VxWorks loads
12159 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
12161 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
12162 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
12163 case UNSPEC_GOTOFF:
12164 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
12165 While ABI specify also 32bit relocation but we don't produce it in
12166 small PIC model at all. */
12167 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
12168 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
12170 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
12172 case UNSPEC_GOTTPOFF:
12173 case UNSPEC_GOTNTPOFF:
12174 case UNSPEC_INDNTPOFF:
12177 disp = XVECEXP (disp, 0, 0);
12178 return (GET_CODE (disp) == SYMBOL_REF
12179 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
12180 case UNSPEC_NTPOFF:
12181 disp = XVECEXP (disp, 0, 0);
12182 return (GET_CODE (disp) == SYMBOL_REF
12183 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
12184 case UNSPEC_DTPOFF:
12185 disp = XVECEXP (disp, 0, 0);
12186 return (GET_CODE (disp) == SYMBOL_REF
12187 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
12193 /* Recognizes RTL expressions that are valid memory addresses for an
12194 instruction. The MODE argument is the machine mode for the MEM
12195 expression that wants to use this address.
12197 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
12198 convert common non-canonical forms to canonical form so that they will
12202 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
12203 rtx addr, bool strict)
12205 struct ix86_address parts;
12206 rtx base, index, disp;
12207 HOST_WIDE_INT scale;
12209 if (ix86_decompose_address (addr, &parts) <= 0)
12210 /* Decomposition failed. */
12214 index = parts.index;
12216 scale = parts.scale;
12218 /* Validate base register.
12220 Don't allow SUBREG's that span more than a word here. It can lead to spill
12221 failures when the base is one word out of a two word structure, which is
12222 represented internally as a DImode int. */
12230 else if (GET_CODE (base) == SUBREG
12231 && REG_P (SUBREG_REG (base))
12232 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
12234 reg = SUBREG_REG (base);
12236 /* Base is not a register. */
12239 if (GET_MODE (base) != Pmode)
12240 /* Base is not in Pmode. */
12243 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
12244 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
12245 /* Base is not valid. */
12249 /* Validate index register.
12251 Don't allow SUBREG's that span more than a word here -- same as above. */
12259 else if (GET_CODE (index) == SUBREG
12260 && REG_P (SUBREG_REG (index))
12261 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
12263 reg = SUBREG_REG (index);
12265 /* Index is not a register. */
12268 if (GET_MODE (index) != Pmode)
12269 /* Index is not in Pmode. */
12272 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
12273 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
12274 /* Index is not valid. */
12278 /* Validate scale factor. */
12282 /* Scale without index. */
12285 if (scale != 2 && scale != 4 && scale != 8)
12286 /* Scale is not a valid multiplier. */
12290 /* Validate displacement. */
12293 if (GET_CODE (disp) == CONST
12294 && GET_CODE (XEXP (disp, 0)) == UNSPEC
12295 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
12296 switch (XINT (XEXP (disp, 0), 1))
12298 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
12299 used. While ABI specify also 32bit relocations, we don't produce
12300 them at all and use IP relative instead. */
12302 case UNSPEC_GOTOFF:
12303 gcc_assert (flag_pic);
12305 goto is_legitimate_pic;
12307 /* 64bit address unspec. */
12310 case UNSPEC_GOTPCREL:
12312 gcc_assert (flag_pic);
12313 goto is_legitimate_pic;
12315 case UNSPEC_GOTTPOFF:
12316 case UNSPEC_GOTNTPOFF:
12317 case UNSPEC_INDNTPOFF:
12318 case UNSPEC_NTPOFF:
12319 case UNSPEC_DTPOFF:
12322 case UNSPEC_STACK_CHECK:
12323 gcc_assert (flag_split_stack);
12327 /* Invalid address unspec. */
12331 else if (SYMBOLIC_CONST (disp)
12335 && MACHOPIC_INDIRECT
12336 && !machopic_operand_p (disp)
12342 if (TARGET_64BIT && (index || base))
12344 /* foo@dtpoff(%rX) is ok. */
12345 if (GET_CODE (disp) != CONST
12346 || GET_CODE (XEXP (disp, 0)) != PLUS
12347 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
12348 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
12349 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
12350 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
12351 /* Non-constant pic memory reference. */
12354 else if ((!TARGET_MACHO || flag_pic)
12355 && ! legitimate_pic_address_disp_p (disp))
12356 /* Displacement is an invalid pic construct. */
12359 else if (MACHO_DYNAMIC_NO_PIC_P
12360 && !ix86_legitimate_constant_p (Pmode, disp))
12361 /* displacment must be referenced via non_lazy_pointer */
12365 /* This code used to verify that a symbolic pic displacement
12366 includes the pic_offset_table_rtx register.
12368 While this is good idea, unfortunately these constructs may
12369 be created by "adds using lea" optimization for incorrect
12378 This code is nonsensical, but results in addressing
12379 GOT table with pic_offset_table_rtx base. We can't
12380 just refuse it easily, since it gets matched by
12381 "addsi3" pattern, that later gets split to lea in the
12382 case output register differs from input. While this
12383 can be handled by separate addsi pattern for this case
12384 that never results in lea, this seems to be easier and
12385 correct fix for crash to disable this test. */
12387 else if (GET_CODE (disp) != LABEL_REF
12388 && !CONST_INT_P (disp)
12389 && (GET_CODE (disp) != CONST
12390 || !ix86_legitimate_constant_p (Pmode, disp))
12391 && (GET_CODE (disp) != SYMBOL_REF
12392 || !ix86_legitimate_constant_p (Pmode, disp)))
12393 /* Displacement is not constant. */
12395 else if (TARGET_64BIT
12396 && !x86_64_immediate_operand (disp, VOIDmode))
12397 /* Displacement is out of range. */
12401 /* Everything looks valid. */
12405 /* Determine if a given RTX is a valid constant address. */
12408 constant_address_p (rtx x)
12410 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
12413 /* Return a unique alias set for the GOT. */
12415 static alias_set_type
12416 ix86_GOT_alias_set (void)
12418 static alias_set_type set = -1;
12420 set = new_alias_set ();
12424 /* Return a legitimate reference for ORIG (an address) using the
12425 register REG. If REG is 0, a new pseudo is generated.
12427 There are two types of references that must be handled:
12429 1. Global data references must load the address from the GOT, via
12430 the PIC reg. An insn is emitted to do this load, and the reg is
12433 2. Static data references, constant pool addresses, and code labels
12434 compute the address as an offset from the GOT, whose base is in
12435 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
12436 differentiate them from global data objects. The returned
12437 address is the PIC reg + an unspec constant.
12439 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
12440 reg also appears in the address. */
12443 legitimize_pic_address (rtx orig, rtx reg)
12446 rtx new_rtx = orig;
12450 if (TARGET_MACHO && !TARGET_64BIT)
12453 reg = gen_reg_rtx (Pmode);
12454 /* Use the generic Mach-O PIC machinery. */
12455 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
12459 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
12461 else if (TARGET_64BIT
12462 && ix86_cmodel != CM_SMALL_PIC
12463 && gotoff_operand (addr, Pmode))
12466 /* This symbol may be referenced via a displacement from the PIC
12467 base address (@GOTOFF). */
12469 if (reload_in_progress)
12470 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12471 if (GET_CODE (addr) == CONST)
12472 addr = XEXP (addr, 0);
12473 if (GET_CODE (addr) == PLUS)
12475 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12477 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12480 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12481 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12483 tmpreg = gen_reg_rtx (Pmode);
12486 emit_move_insn (tmpreg, new_rtx);
12490 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
12491 tmpreg, 1, OPTAB_DIRECT);
12494 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
12496 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
12498 /* This symbol may be referenced via a displacement from the PIC
12499 base address (@GOTOFF). */
12501 if (reload_in_progress)
12502 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12503 if (GET_CODE (addr) == CONST)
12504 addr = XEXP (addr, 0);
12505 if (GET_CODE (addr) == PLUS)
12507 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12509 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12512 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12513 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12514 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12518 emit_move_insn (reg, new_rtx);
12522 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
12523 /* We can't use @GOTOFF for text labels on VxWorks;
12524 see gotoff_operand. */
12525 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
12527 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12529 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
12530 return legitimize_dllimport_symbol (addr, true);
12531 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
12532 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
12533 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
12535 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
12536 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
12540 /* For x64 PE-COFF there is no GOT table. So we use address
12542 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12544 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_PCREL);
12545 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12548 reg = gen_reg_rtx (Pmode);
12549 emit_move_insn (reg, new_rtx);
12552 else if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
12554 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
12555 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12556 new_rtx = gen_const_mem (Pmode, new_rtx);
12557 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12560 reg = gen_reg_rtx (Pmode);
12561 /* Use directly gen_movsi, otherwise the address is loaded
12562 into register for CSE. We don't want to CSE this addresses,
12563 instead we CSE addresses from the GOT table, so skip this. */
12564 emit_insn (gen_movsi (reg, new_rtx));
12569 /* This symbol must be referenced via a load from the
12570 Global Offset Table (@GOT). */
12572 if (reload_in_progress)
12573 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12574 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
12575 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12577 new_rtx = force_reg (Pmode, new_rtx);
12578 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12579 new_rtx = gen_const_mem (Pmode, new_rtx);
12580 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12583 reg = gen_reg_rtx (Pmode);
12584 emit_move_insn (reg, new_rtx);
12590 if (CONST_INT_P (addr)
12591 && !x86_64_immediate_operand (addr, VOIDmode))
12595 emit_move_insn (reg, addr);
12599 new_rtx = force_reg (Pmode, addr);
12601 else if (GET_CODE (addr) == CONST)
12603 addr = XEXP (addr, 0);
12605 /* We must match stuff we generate before. Assume the only
12606 unspecs that can get here are ours. Not that we could do
12607 anything with them anyway.... */
12608 if (GET_CODE (addr) == UNSPEC
12609 || (GET_CODE (addr) == PLUS
12610 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
12612 gcc_assert (GET_CODE (addr) == PLUS);
12614 if (GET_CODE (addr) == PLUS)
12616 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
12618 /* Check first to see if this is a constant offset from a @GOTOFF
12619 symbol reference. */
12620 if (gotoff_operand (op0, Pmode)
12621 && CONST_INT_P (op1))
12625 if (reload_in_progress)
12626 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12627 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
12629 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
12630 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12631 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12635 emit_move_insn (reg, new_rtx);
12641 if (INTVAL (op1) < -16*1024*1024
12642 || INTVAL (op1) >= 16*1024*1024)
12644 if (!x86_64_immediate_operand (op1, Pmode))
12645 op1 = force_reg (Pmode, op1);
12646 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
12652 base = legitimize_pic_address (XEXP (addr, 0), reg);
12653 new_rtx = legitimize_pic_address (XEXP (addr, 1),
12654 base == reg ? NULL_RTX : reg);
12656 if (CONST_INT_P (new_rtx))
12657 new_rtx = plus_constant (base, INTVAL (new_rtx));
12660 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
12662 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
12663 new_rtx = XEXP (new_rtx, 1);
12665 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
12673 /* Load the thread pointer. If TO_REG is true, force it into a register. */
12676 get_thread_pointer (int to_reg)
12680 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
12684 reg = gen_reg_rtx (Pmode);
12685 insn = gen_rtx_SET (VOIDmode, reg, tp);
12686 insn = emit_insn (insn);
12691 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
12692 false if we expect this to be used for a memory address and true if
12693 we expect to load the address into a register. */
12696 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
12698 rtx dest, base, off, pic, tp;
12703 case TLS_MODEL_GLOBAL_DYNAMIC:
12704 dest = gen_reg_rtx (Pmode);
12705 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12707 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12709 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
12712 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
12713 insns = get_insns ();
12716 RTL_CONST_CALL_P (insns) = 1;
12717 emit_libcall_block (insns, dest, rax, x);
12719 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12720 emit_insn (gen_tls_global_dynamic_64 (dest, x));
12722 emit_insn (gen_tls_global_dynamic_32 (dest, x));
12724 if (TARGET_GNU2_TLS)
12726 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
12728 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12732 case TLS_MODEL_LOCAL_DYNAMIC:
12733 base = gen_reg_rtx (Pmode);
12734 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
12736 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
12738 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
12741 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
12742 insns = get_insns ();
12745 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
12746 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
12747 RTL_CONST_CALL_P (insns) = 1;
12748 emit_libcall_block (insns, base, rax, note);
12750 else if (TARGET_64BIT && TARGET_GNU2_TLS)
12751 emit_insn (gen_tls_local_dynamic_base_64 (base));
12753 emit_insn (gen_tls_local_dynamic_base_32 (base));
12755 if (TARGET_GNU2_TLS)
12757 rtx x = ix86_tls_module_base ();
12759 set_unique_reg_note (get_last_insn (), REG_EQUIV,
12760 gen_rtx_MINUS (Pmode, x, tp));
12763 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
12764 off = gen_rtx_CONST (Pmode, off);
12766 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
12768 if (TARGET_GNU2_TLS)
12770 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
12772 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12777 case TLS_MODEL_INITIAL_EXEC:
12780 if (TARGET_SUN_TLS)
12782 /* The Sun linker took the AMD64 TLS spec literally
12783 and can only handle %rax as destination of the
12784 initial executable code sequence. */
12786 dest = gen_reg_rtx (Pmode);
12787 emit_insn (gen_tls_initial_exec_64_sun (dest, x));
12792 type = UNSPEC_GOTNTPOFF;
12796 if (reload_in_progress)
12797 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12798 pic = pic_offset_table_rtx;
12799 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
12801 else if (!TARGET_ANY_GNU_TLS)
12803 pic = gen_reg_rtx (Pmode);
12804 emit_insn (gen_set_got (pic));
12805 type = UNSPEC_GOTTPOFF;
12810 type = UNSPEC_INDNTPOFF;
12813 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
12814 off = gen_rtx_CONST (Pmode, off);
12816 off = gen_rtx_PLUS (Pmode, pic, off);
12817 off = gen_const_mem (Pmode, off);
12818 set_mem_alias_set (off, ix86_GOT_alias_set ());
12820 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12822 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12823 off = force_reg (Pmode, off);
12824 return gen_rtx_PLUS (Pmode, base, off);
12828 base = get_thread_pointer (true);
12829 dest = gen_reg_rtx (Pmode);
12830 emit_insn (gen_subsi3 (dest, base, off));
12834 case TLS_MODEL_LOCAL_EXEC:
12835 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
12836 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12837 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
12838 off = gen_rtx_CONST (Pmode, off);
12840 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12842 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12843 return gen_rtx_PLUS (Pmode, base, off);
12847 base = get_thread_pointer (true);
12848 dest = gen_reg_rtx (Pmode);
12849 emit_insn (gen_subsi3 (dest, base, off));
12854 gcc_unreachable ();
12860 /* Create or return the unique __imp_DECL dllimport symbol corresponding
12863 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
12864 htab_t dllimport_map;
12867 get_dllimport_decl (tree decl)
12869 struct tree_map *h, in;
12872 const char *prefix;
12873 size_t namelen, prefixlen;
12878 if (!dllimport_map)
12879 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
12881 in.hash = htab_hash_pointer (decl);
12882 in.base.from = decl;
12883 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
12884 h = (struct tree_map *) *loc;
12888 *loc = h = ggc_alloc_tree_map ();
12890 h->base.from = decl;
12891 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
12892 VAR_DECL, NULL, ptr_type_node);
12893 DECL_ARTIFICIAL (to) = 1;
12894 DECL_IGNORED_P (to) = 1;
12895 DECL_EXTERNAL (to) = 1;
12896 TREE_READONLY (to) = 1;
12898 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
12899 name = targetm.strip_name_encoding (name);
12900 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
12901 ? "*__imp_" : "*__imp__";
12902 namelen = strlen (name);
12903 prefixlen = strlen (prefix);
12904 imp_name = (char *) alloca (namelen + prefixlen + 1);
12905 memcpy (imp_name, prefix, prefixlen);
12906 memcpy (imp_name + prefixlen, name, namelen + 1);
12908 name = ggc_alloc_string (imp_name, namelen + prefixlen);
12909 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
12910 SET_SYMBOL_REF_DECL (rtl, to);
12911 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
12913 rtl = gen_const_mem (Pmode, rtl);
12914 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
12916 SET_DECL_RTL (to, rtl);
12917 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
12922 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
12923 true if we require the result be a register. */
12926 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
12931 gcc_assert (SYMBOL_REF_DECL (symbol));
12932 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
12934 x = DECL_RTL (imp_decl);
12936 x = force_reg (Pmode, x);
12940 /* Try machine-dependent ways of modifying an illegitimate address
12941 to be legitimate. If we find one, return the new, valid address.
12942 This macro is used in only one place: `memory_address' in explow.c.
12944 OLDX is the address as it was before break_out_memory_refs was called.
12945 In some cases it is useful to look at this to decide what needs to be done.
12947 It is always safe for this macro to do nothing. It exists to recognize
12948 opportunities to optimize the output.
12950 For the 80386, we handle X+REG by loading X into a register R and
12951 using R+REG. R will go in a general reg and indexing will be used.
12952 However, if REG is a broken-out memory address or multiplication,
12953 nothing needs to be done because REG can certainly go in a general reg.
12955 When -fpic is used, special handling is needed for symbolic references.
12956 See comments by legitimize_pic_address in i386.c for details. */
12959 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
12960 enum machine_mode mode)
12965 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
12967 return legitimize_tls_address (x, (enum tls_model) log, false);
12968 if (GET_CODE (x) == CONST
12969 && GET_CODE (XEXP (x, 0)) == PLUS
12970 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12971 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
12973 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
12974 (enum tls_model) log, false);
12975 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12978 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12980 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
12981 return legitimize_dllimport_symbol (x, true);
12982 if (GET_CODE (x) == CONST
12983 && GET_CODE (XEXP (x, 0)) == PLUS
12984 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12985 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
12987 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
12988 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12992 if (flag_pic && SYMBOLIC_CONST (x))
12993 return legitimize_pic_address (x, 0);
12996 if (MACHO_DYNAMIC_NO_PIC_P && SYMBOLIC_CONST (x))
12997 return machopic_indirect_data_reference (x, 0);
13000 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
13001 if (GET_CODE (x) == ASHIFT
13002 && CONST_INT_P (XEXP (x, 1))
13003 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
13006 log = INTVAL (XEXP (x, 1));
13007 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
13008 GEN_INT (1 << log));
13011 if (GET_CODE (x) == PLUS)
13013 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
13015 if (GET_CODE (XEXP (x, 0)) == ASHIFT
13016 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
13017 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
13020 log = INTVAL (XEXP (XEXP (x, 0), 1));
13021 XEXP (x, 0) = gen_rtx_MULT (Pmode,
13022 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
13023 GEN_INT (1 << log));
13026 if (GET_CODE (XEXP (x, 1)) == ASHIFT
13027 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
13028 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
13031 log = INTVAL (XEXP (XEXP (x, 1), 1));
13032 XEXP (x, 1) = gen_rtx_MULT (Pmode,
13033 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
13034 GEN_INT (1 << log));
13037 /* Put multiply first if it isn't already. */
13038 if (GET_CODE (XEXP (x, 1)) == MULT)
13040 rtx tmp = XEXP (x, 0);
13041 XEXP (x, 0) = XEXP (x, 1);
13046 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
13047 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
13048 created by virtual register instantiation, register elimination, and
13049 similar optimizations. */
13050 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
13053 x = gen_rtx_PLUS (Pmode,
13054 gen_rtx_PLUS (Pmode, XEXP (x, 0),
13055 XEXP (XEXP (x, 1), 0)),
13056 XEXP (XEXP (x, 1), 1));
13060 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
13061 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
13062 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
13063 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
13064 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
13065 && CONSTANT_P (XEXP (x, 1)))
13068 rtx other = NULL_RTX;
13070 if (CONST_INT_P (XEXP (x, 1)))
13072 constant = XEXP (x, 1);
13073 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
13075 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
13077 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
13078 other = XEXP (x, 1);
13086 x = gen_rtx_PLUS (Pmode,
13087 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
13088 XEXP (XEXP (XEXP (x, 0), 1), 0)),
13089 plus_constant (other, INTVAL (constant)));
13093 if (changed && ix86_legitimate_address_p (mode, x, false))
13096 if (GET_CODE (XEXP (x, 0)) == MULT)
13099 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
13102 if (GET_CODE (XEXP (x, 1)) == MULT)
13105 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
13109 && REG_P (XEXP (x, 1))
13110 && REG_P (XEXP (x, 0)))
13113 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
13116 x = legitimize_pic_address (x, 0);
13119 if (changed && ix86_legitimate_address_p (mode, x, false))
13122 if (REG_P (XEXP (x, 0)))
13124 rtx temp = gen_reg_rtx (Pmode);
13125 rtx val = force_operand (XEXP (x, 1), temp);
13127 emit_move_insn (temp, val);
13129 XEXP (x, 1) = temp;
13133 else if (REG_P (XEXP (x, 1)))
13135 rtx temp = gen_reg_rtx (Pmode);
13136 rtx val = force_operand (XEXP (x, 0), temp);
13138 emit_move_insn (temp, val);
13140 XEXP (x, 0) = temp;
13148 /* Print an integer constant expression in assembler syntax. Addition
13149 and subtraction are the only arithmetic that may appear in these
13150 expressions. FILE is the stdio stream to write to, X is the rtx, and
13151 CODE is the operand print code from the output string. */
13154 output_pic_addr_const (FILE *file, rtx x, int code)
13158 switch (GET_CODE (x))
13161 gcc_assert (flag_pic);
13166 if (TARGET_64BIT || ! TARGET_MACHO_BRANCH_ISLANDS)
13167 output_addr_const (file, x);
13170 const char *name = XSTR (x, 0);
13172 /* Mark the decl as referenced so that cgraph will
13173 output the function. */
13174 if (SYMBOL_REF_DECL (x))
13175 mark_decl_referenced (SYMBOL_REF_DECL (x));
13178 if (MACHOPIC_INDIRECT
13179 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
13180 name = machopic_indirection_name (x, /*stub_p=*/true);
13182 assemble_name (file, name);
13184 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
13185 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
13186 fputs ("@PLT", file);
13193 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
13194 assemble_name (asm_out_file, buf);
13198 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
13202 /* This used to output parentheses around the expression,
13203 but that does not work on the 386 (either ATT or BSD assembler). */
13204 output_pic_addr_const (file, XEXP (x, 0), code);
13208 if (GET_MODE (x) == VOIDmode)
13210 /* We can use %d if the number is <32 bits and positive. */
13211 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
13212 fprintf (file, "0x%lx%08lx",
13213 (unsigned long) CONST_DOUBLE_HIGH (x),
13214 (unsigned long) CONST_DOUBLE_LOW (x));
13216 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
13219 /* We can't handle floating point constants;
13220 TARGET_PRINT_OPERAND must handle them. */
13221 output_operand_lossage ("floating constant misused");
13225 /* Some assemblers need integer constants to appear first. */
13226 if (CONST_INT_P (XEXP (x, 0)))
13228 output_pic_addr_const (file, XEXP (x, 0), code);
13230 output_pic_addr_const (file, XEXP (x, 1), code);
13234 gcc_assert (CONST_INT_P (XEXP (x, 1)));
13235 output_pic_addr_const (file, XEXP (x, 1), code);
13237 output_pic_addr_const (file, XEXP (x, 0), code);
13243 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
13244 output_pic_addr_const (file, XEXP (x, 0), code);
13246 output_pic_addr_const (file, XEXP (x, 1), code);
13248 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
13252 if (XINT (x, 1) == UNSPEC_STACK_CHECK)
13254 bool f = i386_asm_output_addr_const_extra (file, x);
13259 gcc_assert (XVECLEN (x, 0) == 1);
13260 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
13261 switch (XINT (x, 1))
13264 fputs ("@GOT", file);
13266 case UNSPEC_GOTOFF:
13267 fputs ("@GOTOFF", file);
13269 case UNSPEC_PLTOFF:
13270 fputs ("@PLTOFF", file);
13273 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13274 "(%rip)" : "[rip]", file);
13276 case UNSPEC_GOTPCREL:
13277 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13278 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
13280 case UNSPEC_GOTTPOFF:
13281 /* FIXME: This might be @TPOFF in Sun ld too. */
13282 fputs ("@gottpoff", file);
13285 fputs ("@tpoff", file);
13287 case UNSPEC_NTPOFF:
13289 fputs ("@tpoff", file);
13291 fputs ("@ntpoff", file);
13293 case UNSPEC_DTPOFF:
13294 fputs ("@dtpoff", file);
13296 case UNSPEC_GOTNTPOFF:
13298 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13299 "@gottpoff(%rip)": "@gottpoff[rip]", file);
13301 fputs ("@gotntpoff", file);
13303 case UNSPEC_INDNTPOFF:
13304 fputs ("@indntpoff", file);
13307 case UNSPEC_MACHOPIC_OFFSET:
13309 machopic_output_function_base_name (file);
13313 output_operand_lossage ("invalid UNSPEC as operand");
13319 output_operand_lossage ("invalid expression as operand");
13323 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
13324 We need to emit DTP-relative relocations. */
13326 static void ATTRIBUTE_UNUSED
13327 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
13329 fputs (ASM_LONG, file);
13330 output_addr_const (file, x);
13331 fputs ("@dtpoff", file);
13337 fputs (", 0", file);
13340 gcc_unreachable ();
13344 /* Return true if X is a representation of the PIC register. This copes
13345 with calls from ix86_find_base_term, where the register might have
13346 been replaced by a cselib value. */
13349 ix86_pic_register_p (rtx x)
13351 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
13352 return (pic_offset_table_rtx
13353 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
13355 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
13358 /* Helper function for ix86_delegitimize_address.
13359 Attempt to delegitimize TLS local-exec accesses. */
13362 ix86_delegitimize_tls_address (rtx orig_x)
13364 rtx x = orig_x, unspec;
13365 struct ix86_address addr;
13367 if (!TARGET_TLS_DIRECT_SEG_REFS)
13371 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
13373 if (ix86_decompose_address (x, &addr) == 0
13374 || addr.seg != (TARGET_64BIT ? SEG_FS : SEG_GS)
13375 || addr.disp == NULL_RTX
13376 || GET_CODE (addr.disp) != CONST)
13378 unspec = XEXP (addr.disp, 0);
13379 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
13380 unspec = XEXP (unspec, 0);
13381 if (GET_CODE (unspec) != UNSPEC || XINT (unspec, 1) != UNSPEC_NTPOFF)
13383 x = XVECEXP (unspec, 0, 0);
13384 gcc_assert (GET_CODE (x) == SYMBOL_REF);
13385 if (unspec != XEXP (addr.disp, 0))
13386 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.disp, 0), 1));
13389 rtx idx = addr.index;
13390 if (addr.scale != 1)
13391 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
13392 x = gen_rtx_PLUS (Pmode, idx, x);
13395 x = gen_rtx_PLUS (Pmode, addr.base, x);
13396 if (MEM_P (orig_x))
13397 x = replace_equiv_address_nv (orig_x, x);
13401 /* In the name of slightly smaller debug output, and to cater to
13402 general assembler lossage, recognize PIC+GOTOFF and turn it back
13403 into a direct symbol reference.
13405 On Darwin, this is necessary to avoid a crash, because Darwin
13406 has a different PIC label for each routine but the DWARF debugging
13407 information is not associated with any particular routine, so it's
13408 necessary to remove references to the PIC label from RTL stored by
13409 the DWARF output code. */
13412 ix86_delegitimize_address (rtx x)
13414 rtx orig_x = delegitimize_mem_from_attrs (x);
13415 /* addend is NULL or some rtx if x is something+GOTOFF where
13416 something doesn't include the PIC register. */
13417 rtx addend = NULL_RTX;
13418 /* reg_addend is NULL or a multiple of some register. */
13419 rtx reg_addend = NULL_RTX;
13420 /* const_addend is NULL or a const_int. */
13421 rtx const_addend = NULL_RTX;
13422 /* This is the result, or NULL. */
13423 rtx result = NULL_RTX;
13432 if (GET_CODE (x) != CONST
13433 || GET_CODE (XEXP (x, 0)) != UNSPEC
13434 || (XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
13435 && XINT (XEXP (x, 0), 1) != UNSPEC_PCREL)
13436 || !MEM_P (orig_x))
13437 return ix86_delegitimize_tls_address (orig_x);
13438 x = XVECEXP (XEXP (x, 0), 0, 0);
13439 if (GET_MODE (orig_x) != Pmode)
13441 x = simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
13448 if (GET_CODE (x) != PLUS
13449 || GET_CODE (XEXP (x, 1)) != CONST)
13450 return ix86_delegitimize_tls_address (orig_x);
13452 if (ix86_pic_register_p (XEXP (x, 0)))
13453 /* %ebx + GOT/GOTOFF */
13455 else if (GET_CODE (XEXP (x, 0)) == PLUS)
13457 /* %ebx + %reg * scale + GOT/GOTOFF */
13458 reg_addend = XEXP (x, 0);
13459 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
13460 reg_addend = XEXP (reg_addend, 1);
13461 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
13462 reg_addend = XEXP (reg_addend, 0);
13465 reg_addend = NULL_RTX;
13466 addend = XEXP (x, 0);
13470 addend = XEXP (x, 0);
13472 x = XEXP (XEXP (x, 1), 0);
13473 if (GET_CODE (x) == PLUS
13474 && CONST_INT_P (XEXP (x, 1)))
13476 const_addend = XEXP (x, 1);
13480 if (GET_CODE (x) == UNSPEC
13481 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
13482 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
13483 result = XVECEXP (x, 0, 0);
13485 if (TARGET_MACHO && darwin_local_data_pic (x)
13486 && !MEM_P (orig_x))
13487 result = XVECEXP (x, 0, 0);
13490 return ix86_delegitimize_tls_address (orig_x);
13493 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
13495 result = gen_rtx_PLUS (Pmode, reg_addend, result);
13498 /* If the rest of original X doesn't involve the PIC register, add
13499 addend and subtract pic_offset_table_rtx. This can happen e.g.
13501 leal (%ebx, %ecx, 4), %ecx
13503 movl foo@GOTOFF(%ecx), %edx
13504 in which case we return (%ecx - %ebx) + foo. */
13505 if (pic_offset_table_rtx)
13506 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
13507 pic_offset_table_rtx),
13512 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
13514 result = simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
13515 if (result == NULL_RTX)
13521 /* If X is a machine specific address (i.e. a symbol or label being
13522 referenced as a displacement from the GOT implemented using an
13523 UNSPEC), then return the base term. Otherwise return X. */
13526 ix86_find_base_term (rtx x)
13532 if (GET_CODE (x) != CONST)
13534 term = XEXP (x, 0);
13535 if (GET_CODE (term) == PLUS
13536 && (CONST_INT_P (XEXP (term, 1))
13537 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
13538 term = XEXP (term, 0);
13539 if (GET_CODE (term) != UNSPEC
13540 || (XINT (term, 1) != UNSPEC_GOTPCREL
13541 && XINT (term, 1) != UNSPEC_PCREL))
13544 return XVECEXP (term, 0, 0);
13547 return ix86_delegitimize_address (x);
13551 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
13552 int fp, FILE *file)
13554 const char *suffix;
13556 if (mode == CCFPmode || mode == CCFPUmode)
13558 code = ix86_fp_compare_code_to_integer (code);
13562 code = reverse_condition (code);
13613 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
13617 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
13618 Those same assemblers have the same but opposite lossage on cmov. */
13619 if (mode == CCmode)
13620 suffix = fp ? "nbe" : "a";
13621 else if (mode == CCCmode)
13624 gcc_unreachable ();
13640 gcc_unreachable ();
13644 gcc_assert (mode == CCmode || mode == CCCmode);
13661 gcc_unreachable ();
13665 /* ??? As above. */
13666 gcc_assert (mode == CCmode || mode == CCCmode);
13667 suffix = fp ? "nb" : "ae";
13670 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
13674 /* ??? As above. */
13675 if (mode == CCmode)
13677 else if (mode == CCCmode)
13678 suffix = fp ? "nb" : "ae";
13680 gcc_unreachable ();
13683 suffix = fp ? "u" : "p";
13686 suffix = fp ? "nu" : "np";
13689 gcc_unreachable ();
13691 fputs (suffix, file);
13694 /* Print the name of register X to FILE based on its machine mode and number.
13695 If CODE is 'w', pretend the mode is HImode.
13696 If CODE is 'b', pretend the mode is QImode.
13697 If CODE is 'k', pretend the mode is SImode.
13698 If CODE is 'q', pretend the mode is DImode.
13699 If CODE is 'x', pretend the mode is V4SFmode.
13700 If CODE is 't', pretend the mode is V8SFmode.
13701 If CODE is 'h', pretend the reg is the 'high' byte register.
13702 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
13703 If CODE is 'd', duplicate the operand for AVX instruction.
13707 print_reg (rtx x, int code, FILE *file)
13710 bool duplicated = code == 'd' && TARGET_AVX;
13712 gcc_assert (x == pc_rtx
13713 || (REGNO (x) != ARG_POINTER_REGNUM
13714 && REGNO (x) != FRAME_POINTER_REGNUM
13715 && REGNO (x) != FLAGS_REG
13716 && REGNO (x) != FPSR_REG
13717 && REGNO (x) != FPCR_REG));
13719 if (ASSEMBLER_DIALECT == ASM_ATT)
13724 gcc_assert (TARGET_64BIT);
13725 fputs ("rip", file);
13729 if (code == 'w' || MMX_REG_P (x))
13731 else if (code == 'b')
13733 else if (code == 'k')
13735 else if (code == 'q')
13737 else if (code == 'y')
13739 else if (code == 'h')
13741 else if (code == 'x')
13743 else if (code == 't')
13746 code = GET_MODE_SIZE (GET_MODE (x));
13748 /* Irritatingly, AMD extended registers use different naming convention
13749 from the normal registers. */
13750 if (REX_INT_REG_P (x))
13752 gcc_assert (TARGET_64BIT);
13756 error ("extended registers have no high halves");
13759 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
13762 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
13765 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
13768 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
13771 error ("unsupported operand size for extended register");
13781 if (STACK_TOP_P (x))
13790 if (! ANY_FP_REG_P (x))
13791 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
13796 reg = hi_reg_name[REGNO (x)];
13799 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
13801 reg = qi_reg_name[REGNO (x)];
13804 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
13806 reg = qi_high_reg_name[REGNO (x)];
13811 gcc_assert (!duplicated);
13813 fputs (hi_reg_name[REGNO (x)] + 1, file);
13818 gcc_unreachable ();
13824 if (ASSEMBLER_DIALECT == ASM_ATT)
13825 fprintf (file, ", %%%s", reg);
13827 fprintf (file, ", %s", reg);
13831 /* Locate some local-dynamic symbol still in use by this function
13832 so that we can print its name in some tls_local_dynamic_base
13836 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
13840 if (GET_CODE (x) == SYMBOL_REF
13841 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
13843 cfun->machine->some_ld_name = XSTR (x, 0);
13850 static const char *
13851 get_some_local_dynamic_name (void)
13855 if (cfun->machine->some_ld_name)
13856 return cfun->machine->some_ld_name;
13858 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
13859 if (NONDEBUG_INSN_P (insn)
13860 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
13861 return cfun->machine->some_ld_name;
13866 /* Meaning of CODE:
13867 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
13868 C -- print opcode suffix for set/cmov insn.
13869 c -- like C, but print reversed condition
13870 F,f -- likewise, but for floating-point.
13871 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
13873 R -- print the prefix for register names.
13874 z -- print the opcode suffix for the size of the current operand.
13875 Z -- likewise, with special suffixes for x87 instructions.
13876 * -- print a star (in certain assembler syntax)
13877 A -- print an absolute memory reference.
13878 w -- print the operand as if it's a "word" (HImode) even if it isn't.
13879 s -- print a shift double count, followed by the assemblers argument
13881 b -- print the QImode name of the register for the indicated operand.
13882 %b0 would print %al if operands[0] is reg 0.
13883 w -- likewise, print the HImode name of the register.
13884 k -- likewise, print the SImode name of the register.
13885 q -- likewise, print the DImode name of the register.
13886 x -- likewise, print the V4SFmode name of the register.
13887 t -- likewise, print the V8SFmode name of the register.
13888 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
13889 y -- print "st(0)" instead of "st" as a register.
13890 d -- print duplicated register operand for AVX instruction.
13891 D -- print condition for SSE cmp instruction.
13892 P -- if PIC, print an @PLT suffix.
13893 X -- don't print any sort of PIC '@' suffix for a symbol.
13894 & -- print some in-use local-dynamic symbol name.
13895 H -- print a memory address offset by 8; used for sse high-parts
13896 Y -- print condition for XOP pcom* instruction.
13897 + -- print a branch hint as 'cs' or 'ds' prefix
13898 ; -- print a semicolon (after prefixes due to bug in older gas).
13899 @ -- print a segment register of thread base pointer load
13903 ix86_print_operand (FILE *file, rtx x, int code)
13910 if (ASSEMBLER_DIALECT == ASM_ATT)
13916 const char *name = get_some_local_dynamic_name ();
13918 output_operand_lossage ("'%%&' used without any "
13919 "local dynamic TLS references");
13921 assemble_name (file, name);
13926 switch (ASSEMBLER_DIALECT)
13933 /* Intel syntax. For absolute addresses, registers should not
13934 be surrounded by braces. */
13938 ix86_print_operand (file, x, 0);
13945 gcc_unreachable ();
13948 ix86_print_operand (file, x, 0);
13953 if (ASSEMBLER_DIALECT == ASM_ATT)
13958 if (ASSEMBLER_DIALECT == ASM_ATT)
13963 if (ASSEMBLER_DIALECT == ASM_ATT)
13968 if (ASSEMBLER_DIALECT == ASM_ATT)
13973 if (ASSEMBLER_DIALECT == ASM_ATT)
13978 if (ASSEMBLER_DIALECT == ASM_ATT)
13983 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
13985 /* Opcodes don't get size suffixes if using Intel opcodes. */
13986 if (ASSEMBLER_DIALECT == ASM_INTEL)
13989 switch (GET_MODE_SIZE (GET_MODE (x)))
14008 output_operand_lossage
14009 ("invalid operand size for operand code '%c'", code);
14014 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
14016 (0, "non-integer operand used with operand code '%c'", code);
14020 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
14021 if (ASSEMBLER_DIALECT == ASM_INTEL)
14024 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
14026 switch (GET_MODE_SIZE (GET_MODE (x)))
14029 #ifdef HAVE_AS_IX86_FILDS
14039 #ifdef HAVE_AS_IX86_FILDQ
14042 fputs ("ll", file);
14050 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
14052 /* 387 opcodes don't get size suffixes
14053 if the operands are registers. */
14054 if (STACK_REG_P (x))
14057 switch (GET_MODE_SIZE (GET_MODE (x)))
14078 output_operand_lossage
14079 ("invalid operand type used with operand code '%c'", code);
14083 output_operand_lossage
14084 ("invalid operand size for operand code '%c'", code);
14101 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
14103 ix86_print_operand (file, x, 0);
14104 fputs (", ", file);
14109 /* Little bit of braindamage here. The SSE compare instructions
14110 does use completely different names for the comparisons that the
14111 fp conditional moves. */
14114 switch (GET_CODE (x))
14117 fputs ("eq", file);
14120 fputs ("eq_us", file);
14123 fputs ("lt", file);
14126 fputs ("nge", file);
14129 fputs ("le", file);
14132 fputs ("ngt", file);
14135 fputs ("unord", file);
14138 fputs ("neq", file);
14141 fputs ("neq_oq", file);
14144 fputs ("ge", file);
14147 fputs ("nlt", file);
14150 fputs ("gt", file);
14153 fputs ("nle", file);
14156 fputs ("ord", file);
14159 output_operand_lossage ("operand is not a condition code, "
14160 "invalid operand code 'D'");
14166 switch (GET_CODE (x))
14170 fputs ("eq", file);
14174 fputs ("lt", file);
14178 fputs ("le", file);
14181 fputs ("unord", file);
14185 fputs ("neq", file);
14189 fputs ("nlt", file);
14193 fputs ("nle", file);
14196 fputs ("ord", file);
14199 output_operand_lossage ("operand is not a condition code, "
14200 "invalid operand code 'D'");
14206 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14207 if (ASSEMBLER_DIALECT == ASM_ATT)
14209 switch (GET_MODE (x))
14211 case HImode: putc ('w', file); break;
14213 case SFmode: putc ('l', file); break;
14215 case DFmode: putc ('q', file); break;
14216 default: gcc_unreachable ();
14223 if (!COMPARISON_P (x))
14225 output_operand_lossage ("operand is neither a constant nor a "
14226 "condition code, invalid operand code "
14230 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
14233 if (!COMPARISON_P (x))
14235 output_operand_lossage ("operand is neither a constant nor a "
14236 "condition code, invalid operand code "
14240 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14241 if (ASSEMBLER_DIALECT == ASM_ATT)
14244 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
14247 /* Like above, but reverse condition */
14249 /* Check to see if argument to %c is really a constant
14250 and not a condition code which needs to be reversed. */
14251 if (!COMPARISON_P (x))
14253 output_operand_lossage ("operand is neither a constant nor a "
14254 "condition code, invalid operand "
14258 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
14261 if (!COMPARISON_P (x))
14263 output_operand_lossage ("operand is neither a constant nor a "
14264 "condition code, invalid operand "
14268 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14269 if (ASSEMBLER_DIALECT == ASM_ATT)
14272 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
14276 /* It doesn't actually matter what mode we use here, as we're
14277 only going to use this for printing. */
14278 x = adjust_address_nv (x, DImode, 8);
14286 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
14289 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
14292 int pred_val = INTVAL (XEXP (x, 0));
14294 if (pred_val < REG_BR_PROB_BASE * 45 / 100
14295 || pred_val > REG_BR_PROB_BASE * 55 / 100)
14297 int taken = pred_val > REG_BR_PROB_BASE / 2;
14298 int cputaken = final_forward_branch_p (current_output_insn) == 0;
14300 /* Emit hints only in the case default branch prediction
14301 heuristics would fail. */
14302 if (taken != cputaken)
14304 /* We use 3e (DS) prefix for taken branches and
14305 2e (CS) prefix for not taken branches. */
14307 fputs ("ds ; ", file);
14309 fputs ("cs ; ", file);
14317 switch (GET_CODE (x))
14320 fputs ("neq", file);
14323 fputs ("eq", file);
14327 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
14331 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
14335 fputs ("le", file);
14339 fputs ("lt", file);
14342 fputs ("unord", file);
14345 fputs ("ord", file);
14348 fputs ("ueq", file);
14351 fputs ("nlt", file);
14354 fputs ("nle", file);
14357 fputs ("ule", file);
14360 fputs ("ult", file);
14363 fputs ("une", file);
14366 output_operand_lossage ("operand is not a condition code, "
14367 "invalid operand code 'Y'");
14373 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
14379 if (ASSEMBLER_DIALECT == ASM_ATT)
14382 /* The kernel uses a different segment register for performance
14383 reasons; a system call would not have to trash the userspace
14384 segment register, which would be expensive. */
14385 if (TARGET_64BIT && ix86_cmodel != CM_KERNEL)
14386 fputs ("fs", file);
14388 fputs ("gs", file);
14392 output_operand_lossage ("invalid operand code '%c'", code);
14397 print_reg (x, code, file);
14399 else if (MEM_P (x))
14401 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
14402 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
14403 && GET_MODE (x) != BLKmode)
14406 switch (GET_MODE_SIZE (GET_MODE (x)))
14408 case 1: size = "BYTE"; break;
14409 case 2: size = "WORD"; break;
14410 case 4: size = "DWORD"; break;
14411 case 8: size = "QWORD"; break;
14412 case 12: size = "TBYTE"; break;
14414 if (GET_MODE (x) == XFmode)
14419 case 32: size = "YMMWORD"; break;
14421 gcc_unreachable ();
14424 /* Check for explicit size override (codes 'b', 'w' and 'k') */
14427 else if (code == 'w')
14429 else if (code == 'k')
14432 fputs (size, file);
14433 fputs (" PTR ", file);
14437 /* Avoid (%rip) for call operands. */
14438 if (CONSTANT_ADDRESS_P (x) && code == 'P'
14439 && !CONST_INT_P (x))
14440 output_addr_const (file, x);
14441 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
14442 output_operand_lossage ("invalid constraints for operand");
14444 output_address (x);
14447 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
14452 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14453 REAL_VALUE_TO_TARGET_SINGLE (r, l);
14455 if (ASSEMBLER_DIALECT == ASM_ATT)
14457 /* Sign extend 32bit SFmode immediate to 8 bytes. */
14459 fprintf (file, "0x%08llx", (unsigned long long) (int) l);
14461 fprintf (file, "0x%08x", (unsigned int) l);
14464 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
14469 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14470 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
14472 if (ASSEMBLER_DIALECT == ASM_ATT)
14474 fprintf (file, "0x%lx%08lx", l[1] & 0xffffffff, l[0] & 0xffffffff);
14477 /* These float cases don't actually occur as immediate operands. */
14478 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode)
14482 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
14483 fputs (dstr, file);
14488 /* We have patterns that allow zero sets of memory, for instance.
14489 In 64-bit mode, we should probably support all 8-byte vectors,
14490 since we can in fact encode that into an immediate. */
14491 if (GET_CODE (x) == CONST_VECTOR)
14493 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
14499 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
14501 if (ASSEMBLER_DIALECT == ASM_ATT)
14504 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
14505 || GET_CODE (x) == LABEL_REF)
14507 if (ASSEMBLER_DIALECT == ASM_ATT)
14510 fputs ("OFFSET FLAT:", file);
14513 if (CONST_INT_P (x))
14514 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
14515 else if (flag_pic || MACHOPIC_INDIRECT)
14516 output_pic_addr_const (file, x, code);
14518 output_addr_const (file, x);
14523 ix86_print_operand_punct_valid_p (unsigned char code)
14525 return (code == '@' || code == '*' || code == '+'
14526 || code == '&' || code == ';');
14529 /* Print a memory operand whose address is ADDR. */
14532 ix86_print_operand_address (FILE *file, rtx addr)
14534 struct ix86_address parts;
14535 rtx base, index, disp;
14537 int ok = ix86_decompose_address (addr, &parts);
14542 index = parts.index;
14544 scale = parts.scale;
14552 if (ASSEMBLER_DIALECT == ASM_ATT)
14554 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
14557 gcc_unreachable ();
14560 /* Use one byte shorter RIP relative addressing for 64bit mode. */
14561 if (TARGET_64BIT && !base && !index)
14565 if (GET_CODE (disp) == CONST
14566 && GET_CODE (XEXP (disp, 0)) == PLUS
14567 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14568 symbol = XEXP (XEXP (disp, 0), 0);
14570 if (GET_CODE (symbol) == LABEL_REF
14571 || (GET_CODE (symbol) == SYMBOL_REF
14572 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
14575 if (!base && !index)
14577 /* Displacement only requires special attention. */
14579 if (CONST_INT_P (disp))
14581 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
14582 fputs ("ds:", file);
14583 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
14586 output_pic_addr_const (file, disp, 0);
14588 output_addr_const (file, disp);
14592 if (ASSEMBLER_DIALECT == ASM_ATT)
14597 output_pic_addr_const (file, disp, 0);
14598 else if (GET_CODE (disp) == LABEL_REF)
14599 output_asm_label (disp);
14601 output_addr_const (file, disp);
14606 print_reg (base, 0, file);
14610 print_reg (index, 0, file);
14612 fprintf (file, ",%d", scale);
14618 rtx offset = NULL_RTX;
14622 /* Pull out the offset of a symbol; print any symbol itself. */
14623 if (GET_CODE (disp) == CONST
14624 && GET_CODE (XEXP (disp, 0)) == PLUS
14625 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14627 offset = XEXP (XEXP (disp, 0), 1);
14628 disp = gen_rtx_CONST (VOIDmode,
14629 XEXP (XEXP (disp, 0), 0));
14633 output_pic_addr_const (file, disp, 0);
14634 else if (GET_CODE (disp) == LABEL_REF)
14635 output_asm_label (disp);
14636 else if (CONST_INT_P (disp))
14639 output_addr_const (file, disp);
14645 print_reg (base, 0, file);
14648 if (INTVAL (offset) >= 0)
14650 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14654 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14661 print_reg (index, 0, file);
14663 fprintf (file, "*%d", scale);
14670 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14673 i386_asm_output_addr_const_extra (FILE *file, rtx x)
14677 if (GET_CODE (x) != UNSPEC)
14680 op = XVECEXP (x, 0, 0);
14681 switch (XINT (x, 1))
14683 case UNSPEC_GOTTPOFF:
14684 output_addr_const (file, op);
14685 /* FIXME: This might be @TPOFF in Sun ld. */
14686 fputs ("@gottpoff", file);
14689 output_addr_const (file, op);
14690 fputs ("@tpoff", file);
14692 case UNSPEC_NTPOFF:
14693 output_addr_const (file, op);
14695 fputs ("@tpoff", file);
14697 fputs ("@ntpoff", file);
14699 case UNSPEC_DTPOFF:
14700 output_addr_const (file, op);
14701 fputs ("@dtpoff", file);
14703 case UNSPEC_GOTNTPOFF:
14704 output_addr_const (file, op);
14706 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
14707 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
14709 fputs ("@gotntpoff", file);
14711 case UNSPEC_INDNTPOFF:
14712 output_addr_const (file, op);
14713 fputs ("@indntpoff", file);
14716 case UNSPEC_MACHOPIC_OFFSET:
14717 output_addr_const (file, op);
14719 machopic_output_function_base_name (file);
14723 case UNSPEC_STACK_CHECK:
14727 gcc_assert (flag_split_stack);
14729 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
14730 offset = TARGET_THREAD_SPLIT_STACK_OFFSET;
14732 gcc_unreachable ();
14735 fprintf (file, "%s:%d", TARGET_64BIT ? "%fs" : "%gs", offset);
14746 /* Split one or more double-mode RTL references into pairs of half-mode
14747 references. The RTL can be REG, offsettable MEM, integer constant, or
14748 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
14749 split and "num" is its length. lo_half and hi_half are output arrays
14750 that parallel "operands". */
14753 split_double_mode (enum machine_mode mode, rtx operands[],
14754 int num, rtx lo_half[], rtx hi_half[])
14756 enum machine_mode half_mode;
14762 half_mode = DImode;
14765 half_mode = SImode;
14768 gcc_unreachable ();
14771 byte = GET_MODE_SIZE (half_mode);
14775 rtx op = operands[num];
14777 /* simplify_subreg refuse to split volatile memory addresses,
14778 but we still have to handle it. */
14781 lo_half[num] = adjust_address (op, half_mode, 0);
14782 hi_half[num] = adjust_address (op, half_mode, byte);
14786 lo_half[num] = simplify_gen_subreg (half_mode, op,
14787 GET_MODE (op) == VOIDmode
14788 ? mode : GET_MODE (op), 0);
14789 hi_half[num] = simplify_gen_subreg (half_mode, op,
14790 GET_MODE (op) == VOIDmode
14791 ? mode : GET_MODE (op), byte);
14796 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
14797 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
14798 is the expression of the binary operation. The output may either be
14799 emitted here, or returned to the caller, like all output_* functions.
14801 There is no guarantee that the operands are the same mode, as they
14802 might be within FLOAT or FLOAT_EXTEND expressions. */
14804 #ifndef SYSV386_COMPAT
14805 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
14806 wants to fix the assemblers because that causes incompatibility
14807 with gcc. No-one wants to fix gcc because that causes
14808 incompatibility with assemblers... You can use the option of
14809 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
14810 #define SYSV386_COMPAT 1
14814 output_387_binary_op (rtx insn, rtx *operands)
14816 static char buf[40];
14819 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
14821 #ifdef ENABLE_CHECKING
14822 /* Even if we do not want to check the inputs, this documents input
14823 constraints. Which helps in understanding the following code. */
14824 if (STACK_REG_P (operands[0])
14825 && ((REG_P (operands[1])
14826 && REGNO (operands[0]) == REGNO (operands[1])
14827 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
14828 || (REG_P (operands[2])
14829 && REGNO (operands[0]) == REGNO (operands[2])
14830 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
14831 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
14834 gcc_assert (is_sse);
14837 switch (GET_CODE (operands[3]))
14840 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14841 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14849 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14850 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14858 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14859 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14867 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14868 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14876 gcc_unreachable ();
14883 strcpy (buf, ssep);
14884 if (GET_MODE (operands[0]) == SFmode)
14885 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
14887 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
14891 strcpy (buf, ssep + 1);
14892 if (GET_MODE (operands[0]) == SFmode)
14893 strcat (buf, "ss\t{%2, %0|%0, %2}");
14895 strcat (buf, "sd\t{%2, %0|%0, %2}");
14901 switch (GET_CODE (operands[3]))
14905 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
14907 rtx temp = operands[2];
14908 operands[2] = operands[1];
14909 operands[1] = temp;
14912 /* know operands[0] == operands[1]. */
14914 if (MEM_P (operands[2]))
14920 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14922 if (STACK_TOP_P (operands[0]))
14923 /* How is it that we are storing to a dead operand[2]?
14924 Well, presumably operands[1] is dead too. We can't
14925 store the result to st(0) as st(0) gets popped on this
14926 instruction. Instead store to operands[2] (which I
14927 think has to be st(1)). st(1) will be popped later.
14928 gcc <= 2.8.1 didn't have this check and generated
14929 assembly code that the Unixware assembler rejected. */
14930 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14932 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14936 if (STACK_TOP_P (operands[0]))
14937 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14939 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14944 if (MEM_P (operands[1]))
14950 if (MEM_P (operands[2]))
14956 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14959 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
14960 derived assemblers, confusingly reverse the direction of
14961 the operation for fsub{r} and fdiv{r} when the
14962 destination register is not st(0). The Intel assembler
14963 doesn't have this brain damage. Read !SYSV386_COMPAT to
14964 figure out what the hardware really does. */
14965 if (STACK_TOP_P (operands[0]))
14966 p = "{p\t%0, %2|rp\t%2, %0}";
14968 p = "{rp\t%2, %0|p\t%0, %2}";
14970 if (STACK_TOP_P (operands[0]))
14971 /* As above for fmul/fadd, we can't store to st(0). */
14972 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14974 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14979 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
14982 if (STACK_TOP_P (operands[0]))
14983 p = "{rp\t%0, %1|p\t%1, %0}";
14985 p = "{p\t%1, %0|rp\t%0, %1}";
14987 if (STACK_TOP_P (operands[0]))
14988 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
14990 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
14995 if (STACK_TOP_P (operands[0]))
14997 if (STACK_TOP_P (operands[1]))
14998 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
15000 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
15003 else if (STACK_TOP_P (operands[1]))
15006 p = "{\t%1, %0|r\t%0, %1}";
15008 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
15014 p = "{r\t%2, %0|\t%0, %2}";
15016 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
15022 gcc_unreachable ();
15029 /* Return needed mode for entity in optimize_mode_switching pass. */
15032 ix86_mode_needed (int entity, rtx insn)
15034 enum attr_i387_cw mode;
15036 /* The mode UNINITIALIZED is used to store control word after a
15037 function call or ASM pattern. The mode ANY specify that function
15038 has no requirements on the control word and make no changes in the
15039 bits we are interested in. */
15042 || (NONJUMP_INSN_P (insn)
15043 && (asm_noperands (PATTERN (insn)) >= 0
15044 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
15045 return I387_CW_UNINITIALIZED;
15047 if (recog_memoized (insn) < 0)
15048 return I387_CW_ANY;
15050 mode = get_attr_i387_cw (insn);
15055 if (mode == I387_CW_TRUNC)
15060 if (mode == I387_CW_FLOOR)
15065 if (mode == I387_CW_CEIL)
15070 if (mode == I387_CW_MASK_PM)
15075 gcc_unreachable ();
15078 return I387_CW_ANY;
15081 /* Output code to initialize control word copies used by trunc?f?i and
15082 rounding patterns. CURRENT_MODE is set to current control word,
15083 while NEW_MODE is set to new control word. */
15086 emit_i387_cw_initialization (int mode)
15088 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
15091 enum ix86_stack_slot slot;
15093 rtx reg = gen_reg_rtx (HImode);
15095 emit_insn (gen_x86_fnstcw_1 (stored_mode));
15096 emit_move_insn (reg, copy_rtx (stored_mode));
15098 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
15099 || optimize_function_for_size_p (cfun))
15103 case I387_CW_TRUNC:
15104 /* round toward zero (truncate) */
15105 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
15106 slot = SLOT_CW_TRUNC;
15109 case I387_CW_FLOOR:
15110 /* round down toward -oo */
15111 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
15112 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
15113 slot = SLOT_CW_FLOOR;
15117 /* round up toward +oo */
15118 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
15119 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
15120 slot = SLOT_CW_CEIL;
15123 case I387_CW_MASK_PM:
15124 /* mask precision exception for nearbyint() */
15125 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
15126 slot = SLOT_CW_MASK_PM;
15130 gcc_unreachable ();
15137 case I387_CW_TRUNC:
15138 /* round toward zero (truncate) */
15139 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
15140 slot = SLOT_CW_TRUNC;
15143 case I387_CW_FLOOR:
15144 /* round down toward -oo */
15145 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
15146 slot = SLOT_CW_FLOOR;
15150 /* round up toward +oo */
15151 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
15152 slot = SLOT_CW_CEIL;
15155 case I387_CW_MASK_PM:
15156 /* mask precision exception for nearbyint() */
15157 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
15158 slot = SLOT_CW_MASK_PM;
15162 gcc_unreachable ();
15166 gcc_assert (slot < MAX_386_STACK_LOCALS);
15168 new_mode = assign_386_stack_local (HImode, slot);
15169 emit_move_insn (new_mode, reg);
15172 /* Output code for INSN to convert a float to a signed int. OPERANDS
15173 are the insn operands. The output may be [HSD]Imode and the input
15174 operand may be [SDX]Fmode. */
15177 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
15179 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15180 int dimode_p = GET_MODE (operands[0]) == DImode;
15181 int round_mode = get_attr_i387_cw (insn);
15183 /* Jump through a hoop or two for DImode, since the hardware has no
15184 non-popping instruction. We used to do this a different way, but
15185 that was somewhat fragile and broke with post-reload splitters. */
15186 if ((dimode_p || fisttp) && !stack_top_dies)
15187 output_asm_insn ("fld\t%y1", operands);
15189 gcc_assert (STACK_TOP_P (operands[1]));
15190 gcc_assert (MEM_P (operands[0]));
15191 gcc_assert (GET_MODE (operands[1]) != TFmode);
15194 output_asm_insn ("fisttp%Z0\t%0", operands);
15197 if (round_mode != I387_CW_ANY)
15198 output_asm_insn ("fldcw\t%3", operands);
15199 if (stack_top_dies || dimode_p)
15200 output_asm_insn ("fistp%Z0\t%0", operands);
15202 output_asm_insn ("fist%Z0\t%0", operands);
15203 if (round_mode != I387_CW_ANY)
15204 output_asm_insn ("fldcw\t%2", operands);
15210 /* Output code for x87 ffreep insn. The OPNO argument, which may only
15211 have the values zero or one, indicates the ffreep insn's operand
15212 from the OPERANDS array. */
15214 static const char *
15215 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
15217 if (TARGET_USE_FFREEP)
15218 #ifdef HAVE_AS_IX86_FFREEP
15219 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
15222 static char retval[32];
15223 int regno = REGNO (operands[opno]);
15225 gcc_assert (FP_REGNO_P (regno));
15227 regno -= FIRST_STACK_REG;
15229 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
15234 return opno ? "fstp\t%y1" : "fstp\t%y0";
15238 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
15239 should be used. UNORDERED_P is true when fucom should be used. */
15242 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
15244 int stack_top_dies;
15245 rtx cmp_op0, cmp_op1;
15246 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
15250 cmp_op0 = operands[0];
15251 cmp_op1 = operands[1];
15255 cmp_op0 = operands[1];
15256 cmp_op1 = operands[2];
15261 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
15262 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
15263 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
15264 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
15266 if (GET_MODE (operands[0]) == SFmode)
15268 return &ucomiss[TARGET_AVX ? 0 : 1];
15270 return &comiss[TARGET_AVX ? 0 : 1];
15273 return &ucomisd[TARGET_AVX ? 0 : 1];
15275 return &comisd[TARGET_AVX ? 0 : 1];
15278 gcc_assert (STACK_TOP_P (cmp_op0));
15280 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15282 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
15284 if (stack_top_dies)
15286 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
15287 return output_387_ffreep (operands, 1);
15290 return "ftst\n\tfnstsw\t%0";
15293 if (STACK_REG_P (cmp_op1)
15295 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
15296 && REGNO (cmp_op1) != FIRST_STACK_REG)
15298 /* If both the top of the 387 stack dies, and the other operand
15299 is also a stack register that dies, then this must be a
15300 `fcompp' float compare */
15304 /* There is no double popping fcomi variant. Fortunately,
15305 eflags is immune from the fstp's cc clobbering. */
15307 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
15309 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
15310 return output_387_ffreep (operands, 0);
15315 return "fucompp\n\tfnstsw\t%0";
15317 return "fcompp\n\tfnstsw\t%0";
15322 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
15324 static const char * const alt[16] =
15326 "fcom%Z2\t%y2\n\tfnstsw\t%0",
15327 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
15328 "fucom%Z2\t%y2\n\tfnstsw\t%0",
15329 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
15331 "ficom%Z2\t%y2\n\tfnstsw\t%0",
15332 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
15336 "fcomi\t{%y1, %0|%0, %y1}",
15337 "fcomip\t{%y1, %0|%0, %y1}",
15338 "fucomi\t{%y1, %0|%0, %y1}",
15339 "fucomip\t{%y1, %0|%0, %y1}",
15350 mask = eflags_p << 3;
15351 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
15352 mask |= unordered_p << 1;
15353 mask |= stack_top_dies;
15355 gcc_assert (mask < 16);
15364 ix86_output_addr_vec_elt (FILE *file, int value)
15366 const char *directive = ASM_LONG;
15370 directive = ASM_QUAD;
15372 gcc_assert (!TARGET_64BIT);
15375 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
15379 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
15381 const char *directive = ASM_LONG;
15384 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
15385 directive = ASM_QUAD;
15387 gcc_assert (!TARGET_64BIT);
15389 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
15390 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
15391 fprintf (file, "%s%s%d-%s%d\n",
15392 directive, LPREFIX, value, LPREFIX, rel);
15393 else if (HAVE_AS_GOTOFF_IN_DATA)
15394 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
15396 else if (TARGET_MACHO)
15398 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
15399 machopic_output_function_base_name (file);
15404 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
15405 GOT_SYMBOL_NAME, LPREFIX, value);
15408 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
15412 ix86_expand_clear (rtx dest)
15416 /* We play register width games, which are only valid after reload. */
15417 gcc_assert (reload_completed);
15419 /* Avoid HImode and its attendant prefix byte. */
15420 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
15421 dest = gen_rtx_REG (SImode, REGNO (dest));
15422 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
15424 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
15425 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
15427 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15428 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
15434 /* X is an unchanging MEM. If it is a constant pool reference, return
15435 the constant pool rtx, else NULL. */
15438 maybe_get_pool_constant (rtx x)
15440 x = ix86_delegitimize_address (XEXP (x, 0));
15442 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
15443 return get_pool_constant (x);
15449 ix86_expand_move (enum machine_mode mode, rtx operands[])
15452 enum tls_model model;
15457 if (GET_CODE (op1) == SYMBOL_REF)
15459 model = SYMBOL_REF_TLS_MODEL (op1);
15462 op1 = legitimize_tls_address (op1, model, true);
15463 op1 = force_operand (op1, op0);
15467 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15468 && SYMBOL_REF_DLLIMPORT_P (op1))
15469 op1 = legitimize_dllimport_symbol (op1, false);
15471 else if (GET_CODE (op1) == CONST
15472 && GET_CODE (XEXP (op1, 0)) == PLUS
15473 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
15475 rtx addend = XEXP (XEXP (op1, 0), 1);
15476 rtx symbol = XEXP (XEXP (op1, 0), 0);
15479 model = SYMBOL_REF_TLS_MODEL (symbol);
15481 tmp = legitimize_tls_address (symbol, model, true);
15482 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15483 && SYMBOL_REF_DLLIMPORT_P (symbol))
15484 tmp = legitimize_dllimport_symbol (symbol, true);
15488 tmp = force_operand (tmp, NULL);
15489 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
15490 op0, 1, OPTAB_DIRECT);
15496 if ((flag_pic || MACHOPIC_INDIRECT)
15497 && mode == Pmode && symbolic_operand (op1, Pmode))
15499 if (TARGET_MACHO && !TARGET_64BIT)
15502 /* dynamic-no-pic */
15503 if (MACHOPIC_INDIRECT)
15505 rtx temp = ((reload_in_progress
15506 || ((op0 && REG_P (op0))
15508 ? op0 : gen_reg_rtx (Pmode));
15509 op1 = machopic_indirect_data_reference (op1, temp);
15511 op1 = machopic_legitimize_pic_address (op1, mode,
15512 temp == op1 ? 0 : temp);
15514 if (op0 != op1 && GET_CODE (op0) != MEM)
15516 rtx insn = gen_rtx_SET (VOIDmode, op0, op1);
15520 if (GET_CODE (op0) == MEM)
15521 op1 = force_reg (Pmode, op1);
15525 if (GET_CODE (temp) != REG)
15526 temp = gen_reg_rtx (Pmode);
15527 temp = legitimize_pic_address (op1, temp);
15532 /* dynamic-no-pic */
15538 op1 = force_reg (Pmode, op1);
15539 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
15541 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
15542 op1 = legitimize_pic_address (op1, reg);
15551 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
15552 || !push_operand (op0, mode))
15554 op1 = force_reg (mode, op1);
15556 if (push_operand (op0, mode)
15557 && ! general_no_elim_operand (op1, mode))
15558 op1 = copy_to_mode_reg (mode, op1);
15560 /* Force large constants in 64bit compilation into register
15561 to get them CSEed. */
15562 if (can_create_pseudo_p ()
15563 && (mode == DImode) && TARGET_64BIT
15564 && immediate_operand (op1, mode)
15565 && !x86_64_zext_immediate_operand (op1, VOIDmode)
15566 && !register_operand (op0, mode)
15568 op1 = copy_to_mode_reg (mode, op1);
15570 if (can_create_pseudo_p ()
15571 && FLOAT_MODE_P (mode)
15572 && GET_CODE (op1) == CONST_DOUBLE)
15574 /* If we are loading a floating point constant to a register,
15575 force the value to memory now, since we'll get better code
15576 out the back end. */
15578 op1 = validize_mem (force_const_mem (mode, op1));
15579 if (!register_operand (op0, mode))
15581 rtx temp = gen_reg_rtx (mode);
15582 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
15583 emit_move_insn (op0, temp);
15589 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15593 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
15595 rtx op0 = operands[0], op1 = operands[1];
15596 unsigned int align = GET_MODE_ALIGNMENT (mode);
15598 /* Force constants other than zero into memory. We do not know how
15599 the instructions used to build constants modify the upper 64 bits
15600 of the register, once we have that information we may be able
15601 to handle some of them more efficiently. */
15602 if (can_create_pseudo_p ()
15603 && register_operand (op0, mode)
15604 && (CONSTANT_P (op1)
15605 || (GET_CODE (op1) == SUBREG
15606 && CONSTANT_P (SUBREG_REG (op1))))
15607 && !standard_sse_constant_p (op1))
15608 op1 = validize_mem (force_const_mem (mode, op1));
15610 /* We need to check memory alignment for SSE mode since attribute
15611 can make operands unaligned. */
15612 if (can_create_pseudo_p ()
15613 && SSE_REG_MODE_P (mode)
15614 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
15615 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
15619 /* ix86_expand_vector_move_misalign() does not like constants ... */
15620 if (CONSTANT_P (op1)
15621 || (GET_CODE (op1) == SUBREG
15622 && CONSTANT_P (SUBREG_REG (op1))))
15623 op1 = validize_mem (force_const_mem (mode, op1));
15625 /* ... nor both arguments in memory. */
15626 if (!register_operand (op0, mode)
15627 && !register_operand (op1, mode))
15628 op1 = force_reg (mode, op1);
15630 tmp[0] = op0; tmp[1] = op1;
15631 ix86_expand_vector_move_misalign (mode, tmp);
15635 /* Make operand1 a register if it isn't already. */
15636 if (can_create_pseudo_p ()
15637 && !register_operand (op0, mode)
15638 && !register_operand (op1, mode))
15640 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
15644 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15647 /* Split 32-byte AVX unaligned load and store if needed. */
15650 ix86_avx256_split_vector_move_misalign (rtx op0, rtx op1)
15653 rtx (*extract) (rtx, rtx, rtx);
15654 rtx (*move_unaligned) (rtx, rtx);
15655 enum machine_mode mode;
15657 switch (GET_MODE (op0))
15660 gcc_unreachable ();
15662 extract = gen_avx_vextractf128v32qi;
15663 move_unaligned = gen_avx_movdqu256;
15667 extract = gen_avx_vextractf128v8sf;
15668 move_unaligned = gen_avx_movups256;
15672 extract = gen_avx_vextractf128v4df;
15673 move_unaligned = gen_avx_movupd256;
15678 if (MEM_P (op1) && TARGET_AVX256_SPLIT_UNALIGNED_LOAD)
15680 rtx r = gen_reg_rtx (mode);
15681 m = adjust_address (op1, mode, 0);
15682 emit_move_insn (r, m);
15683 m = adjust_address (op1, mode, 16);
15684 r = gen_rtx_VEC_CONCAT (GET_MODE (op0), r, m);
15685 emit_move_insn (op0, r);
15687 else if (MEM_P (op0) && TARGET_AVX256_SPLIT_UNALIGNED_STORE)
15689 m = adjust_address (op0, mode, 0);
15690 emit_insn (extract (m, op1, const0_rtx));
15691 m = adjust_address (op0, mode, 16);
15692 emit_insn (extract (m, op1, const1_rtx));
15695 emit_insn (move_unaligned (op0, op1));
15698 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
15699 straight to ix86_expand_vector_move. */
15700 /* Code generation for scalar reg-reg moves of single and double precision data:
15701 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
15705 if (x86_sse_partial_reg_dependency == true)
15710 Code generation for scalar loads of double precision data:
15711 if (x86_sse_split_regs == true)
15712 movlpd mem, reg (gas syntax)
15716 Code generation for unaligned packed loads of single precision data
15717 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
15718 if (x86_sse_unaligned_move_optimal)
15721 if (x86_sse_partial_reg_dependency == true)
15733 Code generation for unaligned packed loads of double precision data
15734 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
15735 if (x86_sse_unaligned_move_optimal)
15738 if (x86_sse_split_regs == true)
15751 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
15760 switch (GET_MODE_CLASS (mode))
15762 case MODE_VECTOR_INT:
15764 switch (GET_MODE_SIZE (mode))
15767 /* If we're optimizing for size, movups is the smallest. */
15768 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15770 op0 = gen_lowpart (V4SFmode, op0);
15771 op1 = gen_lowpart (V4SFmode, op1);
15772 emit_insn (gen_sse_movups (op0, op1));
15775 op0 = gen_lowpart (V16QImode, op0);
15776 op1 = gen_lowpart (V16QImode, op1);
15777 emit_insn (gen_sse2_movdqu (op0, op1));
15780 op0 = gen_lowpart (V32QImode, op0);
15781 op1 = gen_lowpart (V32QImode, op1);
15782 ix86_avx256_split_vector_move_misalign (op0, op1);
15785 gcc_unreachable ();
15788 case MODE_VECTOR_FLOAT:
15789 op0 = gen_lowpart (mode, op0);
15790 op1 = gen_lowpart (mode, op1);
15795 emit_insn (gen_sse_movups (op0, op1));
15798 ix86_avx256_split_vector_move_misalign (op0, op1);
15801 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15803 op0 = gen_lowpart (V4SFmode, op0);
15804 op1 = gen_lowpart (V4SFmode, op1);
15805 emit_insn (gen_sse_movups (op0, op1));
15808 emit_insn (gen_sse2_movupd (op0, op1));
15811 ix86_avx256_split_vector_move_misalign (op0, op1);
15814 gcc_unreachable ();
15819 gcc_unreachable ();
15827 /* If we're optimizing for size, movups is the smallest. */
15828 if (optimize_insn_for_size_p ()
15829 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15831 op0 = gen_lowpart (V4SFmode, op0);
15832 op1 = gen_lowpart (V4SFmode, op1);
15833 emit_insn (gen_sse_movups (op0, op1));
15837 /* ??? If we have typed data, then it would appear that using
15838 movdqu is the only way to get unaligned data loaded with
15840 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15842 op0 = gen_lowpart (V16QImode, op0);
15843 op1 = gen_lowpart (V16QImode, op1);
15844 emit_insn (gen_sse2_movdqu (op0, op1));
15848 if (TARGET_SSE2 && mode == V2DFmode)
15852 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15854 op0 = gen_lowpart (V2DFmode, op0);
15855 op1 = gen_lowpart (V2DFmode, op1);
15856 emit_insn (gen_sse2_movupd (op0, op1));
15860 /* When SSE registers are split into halves, we can avoid
15861 writing to the top half twice. */
15862 if (TARGET_SSE_SPLIT_REGS)
15864 emit_clobber (op0);
15869 /* ??? Not sure about the best option for the Intel chips.
15870 The following would seem to satisfy; the register is
15871 entirely cleared, breaking the dependency chain. We
15872 then store to the upper half, with a dependency depth
15873 of one. A rumor has it that Intel recommends two movsd
15874 followed by an unpacklpd, but this is unconfirmed. And
15875 given that the dependency depth of the unpacklpd would
15876 still be one, I'm not sure why this would be better. */
15877 zero = CONST0_RTX (V2DFmode);
15880 m = adjust_address (op1, DFmode, 0);
15881 emit_insn (gen_sse2_loadlpd (op0, zero, m));
15882 m = adjust_address (op1, DFmode, 8);
15883 emit_insn (gen_sse2_loadhpd (op0, op0, m));
15887 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15889 op0 = gen_lowpart (V4SFmode, op0);
15890 op1 = gen_lowpart (V4SFmode, op1);
15891 emit_insn (gen_sse_movups (op0, op1));
15895 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
15896 emit_move_insn (op0, CONST0_RTX (mode));
15898 emit_clobber (op0);
15900 if (mode != V4SFmode)
15901 op0 = gen_lowpart (V4SFmode, op0);
15902 m = adjust_address (op1, V2SFmode, 0);
15903 emit_insn (gen_sse_loadlps (op0, op0, m));
15904 m = adjust_address (op1, V2SFmode, 8);
15905 emit_insn (gen_sse_loadhps (op0, op0, m));
15908 else if (MEM_P (op0))
15910 /* If we're optimizing for size, movups is the smallest. */
15911 if (optimize_insn_for_size_p ()
15912 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15914 op0 = gen_lowpart (V4SFmode, op0);
15915 op1 = gen_lowpart (V4SFmode, op1);
15916 emit_insn (gen_sse_movups (op0, op1));
15920 /* ??? Similar to above, only less clear because of quote
15921 typeless stores unquote. */
15922 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
15923 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15925 op0 = gen_lowpart (V16QImode, op0);
15926 op1 = gen_lowpart (V16QImode, op1);
15927 emit_insn (gen_sse2_movdqu (op0, op1));
15931 if (TARGET_SSE2 && mode == V2DFmode)
15933 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15935 op0 = gen_lowpart (V2DFmode, op0);
15936 op1 = gen_lowpart (V2DFmode, op1);
15937 emit_insn (gen_sse2_movupd (op0, op1));
15941 m = adjust_address (op0, DFmode, 0);
15942 emit_insn (gen_sse2_storelpd (m, op1));
15943 m = adjust_address (op0, DFmode, 8);
15944 emit_insn (gen_sse2_storehpd (m, op1));
15949 if (mode != V4SFmode)
15950 op1 = gen_lowpart (V4SFmode, op1);
15952 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15954 op0 = gen_lowpart (V4SFmode, op0);
15955 emit_insn (gen_sse_movups (op0, op1));
15959 m = adjust_address (op0, V2SFmode, 0);
15960 emit_insn (gen_sse_storelps (m, op1));
15961 m = adjust_address (op0, V2SFmode, 8);
15962 emit_insn (gen_sse_storehps (m, op1));
15967 gcc_unreachable ();
15970 /* Expand a push in MODE. This is some mode for which we do not support
15971 proper push instructions, at least from the registers that we expect
15972 the value to live in. */
15975 ix86_expand_push (enum machine_mode mode, rtx x)
15979 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
15980 GEN_INT (-GET_MODE_SIZE (mode)),
15981 stack_pointer_rtx, 1, OPTAB_DIRECT);
15982 if (tmp != stack_pointer_rtx)
15983 emit_move_insn (stack_pointer_rtx, tmp);
15985 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
15987 /* When we push an operand onto stack, it has to be aligned at least
15988 at the function argument boundary. However since we don't have
15989 the argument type, we can't determine the actual argument
15991 emit_move_insn (tmp, x);
15994 /* Helper function of ix86_fixup_binary_operands to canonicalize
15995 operand order. Returns true if the operands should be swapped. */
15998 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
16001 rtx dst = operands[0];
16002 rtx src1 = operands[1];
16003 rtx src2 = operands[2];
16005 /* If the operation is not commutative, we can't do anything. */
16006 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
16009 /* Highest priority is that src1 should match dst. */
16010 if (rtx_equal_p (dst, src1))
16012 if (rtx_equal_p (dst, src2))
16015 /* Next highest priority is that immediate constants come second. */
16016 if (immediate_operand (src2, mode))
16018 if (immediate_operand (src1, mode))
16021 /* Lowest priority is that memory references should come second. */
16031 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
16032 destination to use for the operation. If different from the true
16033 destination in operands[0], a copy operation will be required. */
16036 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
16039 rtx dst = operands[0];
16040 rtx src1 = operands[1];
16041 rtx src2 = operands[2];
16043 /* Canonicalize operand order. */
16044 if (ix86_swap_binary_operands_p (code, mode, operands))
16048 /* It is invalid to swap operands of different modes. */
16049 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
16056 /* Both source operands cannot be in memory. */
16057 if (MEM_P (src1) && MEM_P (src2))
16059 /* Optimization: Only read from memory once. */
16060 if (rtx_equal_p (src1, src2))
16062 src2 = force_reg (mode, src2);
16066 src2 = force_reg (mode, src2);
16069 /* If the destination is memory, and we do not have matching source
16070 operands, do things in registers. */
16071 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
16072 dst = gen_reg_rtx (mode);
16074 /* Source 1 cannot be a constant. */
16075 if (CONSTANT_P (src1))
16076 src1 = force_reg (mode, src1);
16078 /* Source 1 cannot be a non-matching memory. */
16079 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
16080 src1 = force_reg (mode, src1);
16082 operands[1] = src1;
16083 operands[2] = src2;
16087 /* Similarly, but assume that the destination has already been
16088 set up properly. */
16091 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
16092 enum machine_mode mode, rtx operands[])
16094 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
16095 gcc_assert (dst == operands[0]);
16098 /* Attempt to expand a binary operator. Make the expansion closer to the
16099 actual machine, then just general_operand, which will allow 3 separate
16100 memory references (one output, two input) in a single insn. */
16103 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
16106 rtx src1, src2, dst, op, clob;
16108 dst = ix86_fixup_binary_operands (code, mode, operands);
16109 src1 = operands[1];
16110 src2 = operands[2];
16112 /* Emit the instruction. */
16114 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
16115 if (reload_in_progress)
16117 /* Reload doesn't know about the flags register, and doesn't know that
16118 it doesn't want to clobber it. We can only do this with PLUS. */
16119 gcc_assert (code == PLUS);
16122 else if (reload_completed
16124 && !rtx_equal_p (dst, src1))
16126 /* This is going to be an LEA; avoid splitting it later. */
16131 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16132 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16135 /* Fix up the destination if needed. */
16136 if (dst != operands[0])
16137 emit_move_insn (operands[0], dst);
16140 /* Return TRUE or FALSE depending on whether the binary operator meets the
16141 appropriate constraints. */
16144 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
16147 rtx dst = operands[0];
16148 rtx src1 = operands[1];
16149 rtx src2 = operands[2];
16151 /* Both source operands cannot be in memory. */
16152 if (MEM_P (src1) && MEM_P (src2))
16155 /* Canonicalize operand order for commutative operators. */
16156 if (ix86_swap_binary_operands_p (code, mode, operands))
16163 /* If the destination is memory, we must have a matching source operand. */
16164 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
16167 /* Source 1 cannot be a constant. */
16168 if (CONSTANT_P (src1))
16171 /* Source 1 cannot be a non-matching memory. */
16172 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
16174 /* Support "andhi/andsi/anddi" as a zero-extending move. */
16175 return (code == AND
16178 || (TARGET_64BIT && mode == DImode))
16179 && CONST_INT_P (src2)
16180 && (INTVAL (src2) == 0xff
16181 || INTVAL (src2) == 0xffff));
16187 /* Attempt to expand a unary operator. Make the expansion closer to the
16188 actual machine, then just general_operand, which will allow 2 separate
16189 memory references (one output, one input) in a single insn. */
16192 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
16195 int matching_memory;
16196 rtx src, dst, op, clob;
16201 /* If the destination is memory, and we do not have matching source
16202 operands, do things in registers. */
16203 matching_memory = 0;
16206 if (rtx_equal_p (dst, src))
16207 matching_memory = 1;
16209 dst = gen_reg_rtx (mode);
16212 /* When source operand is memory, destination must match. */
16213 if (MEM_P (src) && !matching_memory)
16214 src = force_reg (mode, src);
16216 /* Emit the instruction. */
16218 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
16219 if (reload_in_progress || code == NOT)
16221 /* Reload doesn't know about the flags register, and doesn't know that
16222 it doesn't want to clobber it. */
16223 gcc_assert (code == NOT);
16228 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16229 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16232 /* Fix up the destination if needed. */
16233 if (dst != operands[0])
16234 emit_move_insn (operands[0], dst);
16237 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
16238 divisor are within the range [0-255]. */
16241 ix86_split_idivmod (enum machine_mode mode, rtx operands[],
16244 rtx end_label, qimode_label;
16245 rtx insn, div, mod;
16246 rtx scratch, tmp0, tmp1, tmp2;
16247 rtx (*gen_divmod4_1) (rtx, rtx, rtx, rtx);
16248 rtx (*gen_zero_extend) (rtx, rtx);
16249 rtx (*gen_test_ccno_1) (rtx, rtx);
16254 gen_divmod4_1 = signed_p ? gen_divmodsi4_1 : gen_udivmodsi4_1;
16255 gen_test_ccno_1 = gen_testsi_ccno_1;
16256 gen_zero_extend = gen_zero_extendqisi2;
16259 gen_divmod4_1 = signed_p ? gen_divmoddi4_1 : gen_udivmoddi4_1;
16260 gen_test_ccno_1 = gen_testdi_ccno_1;
16261 gen_zero_extend = gen_zero_extendqidi2;
16264 gcc_unreachable ();
16267 end_label = gen_label_rtx ();
16268 qimode_label = gen_label_rtx ();
16270 scratch = gen_reg_rtx (mode);
16272 /* Use 8bit unsigned divimod if dividend and divisor are within
16273 the range [0-255]. */
16274 emit_move_insn (scratch, operands[2]);
16275 scratch = expand_simple_binop (mode, IOR, scratch, operands[3],
16276 scratch, 1, OPTAB_DIRECT);
16277 emit_insn (gen_test_ccno_1 (scratch, GEN_INT (-0x100)));
16278 tmp0 = gen_rtx_REG (CCNOmode, FLAGS_REG);
16279 tmp0 = gen_rtx_EQ (VOIDmode, tmp0, const0_rtx);
16280 tmp0 = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp0,
16281 gen_rtx_LABEL_REF (VOIDmode, qimode_label),
16283 insn = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp0));
16284 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16285 JUMP_LABEL (insn) = qimode_label;
16287 /* Generate original signed/unsigned divimod. */
16288 div = gen_divmod4_1 (operands[0], operands[1],
16289 operands[2], operands[3]);
16292 /* Branch to the end. */
16293 emit_jump_insn (gen_jump (end_label));
16296 /* Generate 8bit unsigned divide. */
16297 emit_label (qimode_label);
16298 /* Don't use operands[0] for result of 8bit divide since not all
16299 registers support QImode ZERO_EXTRACT. */
16300 tmp0 = simplify_gen_subreg (HImode, scratch, mode, 0);
16301 tmp1 = simplify_gen_subreg (HImode, operands[2], mode, 0);
16302 tmp2 = simplify_gen_subreg (QImode, operands[3], mode, 0);
16303 emit_insn (gen_udivmodhiqi3 (tmp0, tmp1, tmp2));
16307 div = gen_rtx_DIV (SImode, operands[2], operands[3]);
16308 mod = gen_rtx_MOD (SImode, operands[2], operands[3]);
16312 div = gen_rtx_UDIV (SImode, operands[2], operands[3]);
16313 mod = gen_rtx_UMOD (SImode, operands[2], operands[3]);
16316 /* Extract remainder from AH. */
16317 tmp1 = gen_rtx_ZERO_EXTRACT (mode, tmp0, GEN_INT (8), GEN_INT (8));
16318 if (REG_P (operands[1]))
16319 insn = emit_move_insn (operands[1], tmp1);
16322 /* Need a new scratch register since the old one has result
16324 scratch = gen_reg_rtx (mode);
16325 emit_move_insn (scratch, tmp1);
16326 insn = emit_move_insn (operands[1], scratch);
16328 set_unique_reg_note (insn, REG_EQUAL, mod);
16330 /* Zero extend quotient from AL. */
16331 tmp1 = gen_lowpart (QImode, tmp0);
16332 insn = emit_insn (gen_zero_extend (operands[0], tmp1));
16333 set_unique_reg_note (insn, REG_EQUAL, div);
16335 emit_label (end_label);
16338 #define LEA_SEARCH_THRESHOLD 12
16340 /* Search backward for non-agu definition of register number REGNO1
16341 or register number REGNO2 in INSN's basic block until
16342 1. Pass LEA_SEARCH_THRESHOLD instructions, or
16343 2. Reach BB boundary, or
16344 3. Reach agu definition.
16345 Returns the distance between the non-agu definition point and INSN.
16346 If no definition point, returns -1. */
16349 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
16352 basic_block bb = BLOCK_FOR_INSN (insn);
16355 enum attr_type insn_type;
16357 if (insn != BB_HEAD (bb))
16359 rtx prev = PREV_INSN (insn);
16360 while (prev && distance < LEA_SEARCH_THRESHOLD)
16362 if (NONDEBUG_INSN_P (prev))
16365 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16366 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16367 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16368 && (regno1 == DF_REF_REGNO (*def_rec)
16369 || regno2 == DF_REF_REGNO (*def_rec)))
16371 insn_type = get_attr_type (prev);
16372 if (insn_type != TYPE_LEA)
16376 if (prev == BB_HEAD (bb))
16378 prev = PREV_INSN (prev);
16382 if (distance < LEA_SEARCH_THRESHOLD)
16386 bool simple_loop = false;
16388 FOR_EACH_EDGE (e, ei, bb->preds)
16391 simple_loop = true;
16397 rtx prev = BB_END (bb);
16400 && distance < LEA_SEARCH_THRESHOLD)
16402 if (NONDEBUG_INSN_P (prev))
16405 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16406 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16407 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16408 && (regno1 == DF_REF_REGNO (*def_rec)
16409 || regno2 == DF_REF_REGNO (*def_rec)))
16411 insn_type = get_attr_type (prev);
16412 if (insn_type != TYPE_LEA)
16416 prev = PREV_INSN (prev);
16424 /* get_attr_type may modify recog data. We want to make sure
16425 that recog data is valid for instruction INSN, on which
16426 distance_non_agu_define is called. INSN is unchanged here. */
16427 extract_insn_cached (insn);
16431 /* Return the distance between INSN and the next insn that uses
16432 register number REGNO0 in memory address. Return -1 if no such
16433 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
16436 distance_agu_use (unsigned int regno0, rtx insn)
16438 basic_block bb = BLOCK_FOR_INSN (insn);
16443 if (insn != BB_END (bb))
16445 rtx next = NEXT_INSN (insn);
16446 while (next && distance < LEA_SEARCH_THRESHOLD)
16448 if (NONDEBUG_INSN_P (next))
16452 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16453 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16454 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16455 && regno0 == DF_REF_REGNO (*use_rec))
16457 /* Return DISTANCE if OP0 is used in memory
16458 address in NEXT. */
16462 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16463 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16464 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16465 && regno0 == DF_REF_REGNO (*def_rec))
16467 /* Return -1 if OP0 is set in NEXT. */
16471 if (next == BB_END (bb))
16473 next = NEXT_INSN (next);
16477 if (distance < LEA_SEARCH_THRESHOLD)
16481 bool simple_loop = false;
16483 FOR_EACH_EDGE (e, ei, bb->succs)
16486 simple_loop = true;
16492 rtx next = BB_HEAD (bb);
16495 && distance < LEA_SEARCH_THRESHOLD)
16497 if (NONDEBUG_INSN_P (next))
16501 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16502 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16503 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16504 && regno0 == DF_REF_REGNO (*use_rec))
16506 /* Return DISTANCE if OP0 is used in memory
16507 address in NEXT. */
16511 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16512 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16513 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16514 && regno0 == DF_REF_REGNO (*def_rec))
16516 /* Return -1 if OP0 is set in NEXT. */
16521 next = NEXT_INSN (next);
16529 /* Define this macro to tune LEA priority vs ADD, it take effect when
16530 there is a dilemma of choicing LEA or ADD
16531 Negative value: ADD is more preferred than LEA
16533 Positive value: LEA is more preferred than ADD*/
16534 #define IX86_LEA_PRIORITY 2
16536 /* Return true if it is ok to optimize an ADD operation to LEA
16537 operation to avoid flag register consumation. For most processors,
16538 ADD is faster than LEA. For the processors like ATOM, if the
16539 destination register of LEA holds an actual address which will be
16540 used soon, LEA is better and otherwise ADD is better. */
16543 ix86_lea_for_add_ok (rtx insn, rtx operands[])
16545 unsigned int regno0 = true_regnum (operands[0]);
16546 unsigned int regno1 = true_regnum (operands[1]);
16547 unsigned int regno2 = true_regnum (operands[2]);
16549 /* If a = b + c, (a!=b && a!=c), must use lea form. */
16550 if (regno0 != regno1 && regno0 != regno2)
16553 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16557 int dist_define, dist_use;
16559 /* Return false if REGNO0 isn't used in memory address. */
16560 dist_use = distance_agu_use (regno0, insn);
16564 dist_define = distance_non_agu_define (regno1, regno2, insn);
16565 if (dist_define <= 0)
16568 /* If this insn has both backward non-agu dependence and forward
16569 agu dependence, the one with short distance take effect. */
16570 if ((dist_define + IX86_LEA_PRIORITY) < dist_use)
16577 /* Return true if destination reg of SET_BODY is shift count of
16581 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
16587 /* Retrieve destination of SET_BODY. */
16588 switch (GET_CODE (set_body))
16591 set_dest = SET_DEST (set_body);
16592 if (!set_dest || !REG_P (set_dest))
16596 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
16597 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
16605 /* Retrieve shift count of USE_BODY. */
16606 switch (GET_CODE (use_body))
16609 shift_rtx = XEXP (use_body, 1);
16612 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
16613 if (ix86_dep_by_shift_count_body (set_body,
16614 XVECEXP (use_body, 0, i)))
16622 && (GET_CODE (shift_rtx) == ASHIFT
16623 || GET_CODE (shift_rtx) == LSHIFTRT
16624 || GET_CODE (shift_rtx) == ASHIFTRT
16625 || GET_CODE (shift_rtx) == ROTATE
16626 || GET_CODE (shift_rtx) == ROTATERT))
16628 rtx shift_count = XEXP (shift_rtx, 1);
16630 /* Return true if shift count is dest of SET_BODY. */
16631 if (REG_P (shift_count)
16632 && true_regnum (set_dest) == true_regnum (shift_count))
16639 /* Return true if destination reg of SET_INSN is shift count of
16643 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
16645 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
16646 PATTERN (use_insn));
16649 /* Return TRUE or FALSE depending on whether the unary operator meets the
16650 appropriate constraints. */
16653 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
16654 enum machine_mode mode ATTRIBUTE_UNUSED,
16655 rtx operands[2] ATTRIBUTE_UNUSED)
16657 /* If one of operands is memory, source and destination must match. */
16658 if ((MEM_P (operands[0])
16659 || MEM_P (operands[1]))
16660 && ! rtx_equal_p (operands[0], operands[1]))
16665 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
16666 are ok, keeping in mind the possible movddup alternative. */
16669 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
16671 if (MEM_P (operands[0]))
16672 return rtx_equal_p (operands[0], operands[1 + high]);
16673 if (MEM_P (operands[1]) && MEM_P (operands[2]))
16674 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
16678 /* Post-reload splitter for converting an SF or DFmode value in an
16679 SSE register into an unsigned SImode. */
16682 ix86_split_convert_uns_si_sse (rtx operands[])
16684 enum machine_mode vecmode;
16685 rtx value, large, zero_or_two31, input, two31, x;
16687 large = operands[1];
16688 zero_or_two31 = operands[2];
16689 input = operands[3];
16690 two31 = operands[4];
16691 vecmode = GET_MODE (large);
16692 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
16694 /* Load up the value into the low element. We must ensure that the other
16695 elements are valid floats -- zero is the easiest such value. */
16698 if (vecmode == V4SFmode)
16699 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
16701 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
16705 input = gen_rtx_REG (vecmode, REGNO (input));
16706 emit_move_insn (value, CONST0_RTX (vecmode));
16707 if (vecmode == V4SFmode)
16708 emit_insn (gen_sse_movss (value, value, input));
16710 emit_insn (gen_sse2_movsd (value, value, input));
16713 emit_move_insn (large, two31);
16714 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
16716 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
16717 emit_insn (gen_rtx_SET (VOIDmode, large, x));
16719 x = gen_rtx_AND (vecmode, zero_or_two31, large);
16720 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
16722 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
16723 emit_insn (gen_rtx_SET (VOIDmode, value, x));
16725 large = gen_rtx_REG (V4SImode, REGNO (large));
16726 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
16728 x = gen_rtx_REG (V4SImode, REGNO (value));
16729 if (vecmode == V4SFmode)
16730 emit_insn (gen_sse2_cvttps2dq (x, value));
16732 emit_insn (gen_sse2_cvttpd2dq (x, value));
16735 emit_insn (gen_xorv4si3 (value, value, large));
16738 /* Convert an unsigned DImode value into a DFmode, using only SSE.
16739 Expects the 64-bit DImode to be supplied in a pair of integral
16740 registers. Requires SSE2; will use SSE3 if available. For x86_32,
16741 -mfpmath=sse, !optimize_size only. */
16744 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
16746 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
16747 rtx int_xmm, fp_xmm;
16748 rtx biases, exponents;
16751 int_xmm = gen_reg_rtx (V4SImode);
16752 if (TARGET_INTER_UNIT_MOVES)
16753 emit_insn (gen_movdi_to_sse (int_xmm, input));
16754 else if (TARGET_SSE_SPLIT_REGS)
16756 emit_clobber (int_xmm);
16757 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
16761 x = gen_reg_rtx (V2DImode);
16762 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
16763 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
16766 x = gen_rtx_CONST_VECTOR (V4SImode,
16767 gen_rtvec (4, GEN_INT (0x43300000UL),
16768 GEN_INT (0x45300000UL),
16769 const0_rtx, const0_rtx));
16770 exponents = validize_mem (force_const_mem (V4SImode, x));
16772 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
16773 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
16775 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
16776 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
16777 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
16778 (0x1.0p84 + double(fp_value_hi_xmm)).
16779 Note these exponents differ by 32. */
16781 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
16783 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
16784 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
16785 real_ldexp (&bias_lo_rvt, &dconst1, 52);
16786 real_ldexp (&bias_hi_rvt, &dconst1, 84);
16787 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
16788 x = const_double_from_real_value (bias_hi_rvt, DFmode);
16789 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
16790 biases = validize_mem (force_const_mem (V2DFmode, biases));
16791 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
16793 /* Add the upper and lower DFmode values together. */
16795 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
16798 x = copy_to_mode_reg (V2DFmode, fp_xmm);
16799 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
16800 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
16803 ix86_expand_vector_extract (false, target, fp_xmm, 0);
16806 /* Not used, but eases macroization of patterns. */
16808 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
16809 rtx input ATTRIBUTE_UNUSED)
16811 gcc_unreachable ();
16814 /* Convert an unsigned SImode value into a DFmode. Only currently used
16815 for SSE, but applicable anywhere. */
16818 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
16820 REAL_VALUE_TYPE TWO31r;
16823 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
16824 NULL, 1, OPTAB_DIRECT);
16826 fp = gen_reg_rtx (DFmode);
16827 emit_insn (gen_floatsidf2 (fp, x));
16829 real_ldexp (&TWO31r, &dconst1, 31);
16830 x = const_double_from_real_value (TWO31r, DFmode);
16832 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
16834 emit_move_insn (target, x);
16837 /* Convert a signed DImode value into a DFmode. Only used for SSE in
16838 32-bit mode; otherwise we have a direct convert instruction. */
16841 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
16843 REAL_VALUE_TYPE TWO32r;
16844 rtx fp_lo, fp_hi, x;
16846 fp_lo = gen_reg_rtx (DFmode);
16847 fp_hi = gen_reg_rtx (DFmode);
16849 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
16851 real_ldexp (&TWO32r, &dconst1, 32);
16852 x = const_double_from_real_value (TWO32r, DFmode);
16853 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
16855 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
16857 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
16860 emit_move_insn (target, x);
16863 /* Convert an unsigned SImode value into a SFmode, using only SSE.
16864 For x86_32, -mfpmath=sse, !optimize_size only. */
16866 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
16868 REAL_VALUE_TYPE ONE16r;
16869 rtx fp_hi, fp_lo, int_hi, int_lo, x;
16871 real_ldexp (&ONE16r, &dconst1, 16);
16872 x = const_double_from_real_value (ONE16r, SFmode);
16873 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
16874 NULL, 0, OPTAB_DIRECT);
16875 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
16876 NULL, 0, OPTAB_DIRECT);
16877 fp_hi = gen_reg_rtx (SFmode);
16878 fp_lo = gen_reg_rtx (SFmode);
16879 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
16880 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
16881 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
16883 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
16885 if (!rtx_equal_p (target, fp_hi))
16886 emit_move_insn (target, fp_hi);
16889 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
16890 then replicate the value for all elements of the vector
16894 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
16901 v = gen_rtvec (4, value, value, value, value);
16902 return gen_rtx_CONST_VECTOR (V4SImode, v);
16906 v = gen_rtvec (2, value, value);
16907 return gen_rtx_CONST_VECTOR (V2DImode, v);
16911 v = gen_rtvec (8, value, value, value, value,
16912 value, value, value, value);
16914 v = gen_rtvec (8, value, CONST0_RTX (SFmode),
16915 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16916 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16917 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16918 return gen_rtx_CONST_VECTOR (V8SFmode, v);
16922 v = gen_rtvec (4, value, value, value, value);
16924 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
16925 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16926 return gen_rtx_CONST_VECTOR (V4SFmode, v);
16930 v = gen_rtvec (4, value, value, value, value);
16932 v = gen_rtvec (4, value, CONST0_RTX (DFmode),
16933 CONST0_RTX (DFmode), CONST0_RTX (DFmode));
16934 return gen_rtx_CONST_VECTOR (V4DFmode, v);
16938 v = gen_rtvec (2, value, value);
16940 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
16941 return gen_rtx_CONST_VECTOR (V2DFmode, v);
16944 gcc_unreachable ();
16948 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
16949 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
16950 for an SSE register. If VECT is true, then replicate the mask for
16951 all elements of the vector register. If INVERT is true, then create
16952 a mask excluding the sign bit. */
16955 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
16957 enum machine_mode vec_mode, imode;
16958 HOST_WIDE_INT hi, lo;
16963 /* Find the sign bit, sign extended to 2*HWI. */
16970 mode = GET_MODE_INNER (mode);
16972 lo = 0x80000000, hi = lo < 0;
16979 mode = GET_MODE_INNER (mode);
16981 if (HOST_BITS_PER_WIDE_INT >= 64)
16982 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
16984 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
16989 vec_mode = VOIDmode;
16990 if (HOST_BITS_PER_WIDE_INT >= 64)
16993 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
17000 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
17004 lo = ~lo, hi = ~hi;
17010 mask = immed_double_const (lo, hi, imode);
17012 vec = gen_rtvec (2, v, mask);
17013 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
17014 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
17021 gcc_unreachable ();
17025 lo = ~lo, hi = ~hi;
17027 /* Force this value into the low part of a fp vector constant. */
17028 mask = immed_double_const (lo, hi, imode);
17029 mask = gen_lowpart (mode, mask);
17031 if (vec_mode == VOIDmode)
17032 return force_reg (mode, mask);
17034 v = ix86_build_const_vector (vec_mode, vect, mask);
17035 return force_reg (vec_mode, v);
17038 /* Generate code for floating point ABS or NEG. */
17041 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
17044 rtx mask, set, dst, src;
17045 bool use_sse = false;
17046 bool vector_mode = VECTOR_MODE_P (mode);
17047 enum machine_mode vmode = mode;
17051 else if (mode == TFmode)
17053 else if (TARGET_SSE_MATH)
17055 use_sse = SSE_FLOAT_MODE_P (mode);
17056 if (mode == SFmode)
17058 else if (mode == DFmode)
17062 /* NEG and ABS performed with SSE use bitwise mask operations.
17063 Create the appropriate mask now. */
17065 mask = ix86_build_signbit_mask (vmode, vector_mode, code == ABS);
17072 set = gen_rtx_fmt_e (code, mode, src);
17073 set = gen_rtx_SET (VOIDmode, dst, set);
17080 use = gen_rtx_USE (VOIDmode, mask);
17082 par = gen_rtvec (2, set, use);
17085 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
17086 par = gen_rtvec (3, set, use, clob);
17088 emit_insn (gen_rtx_PARALLEL (VOIDmode, par));
17094 /* Expand a copysign operation. Special case operand 0 being a constant. */
17097 ix86_expand_copysign (rtx operands[])
17099 enum machine_mode mode, vmode;
17100 rtx dest, op0, op1, mask, nmask;
17102 dest = operands[0];
17106 mode = GET_MODE (dest);
17108 if (mode == SFmode)
17110 else if (mode == DFmode)
17115 if (GET_CODE (op0) == CONST_DOUBLE)
17117 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
17119 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
17120 op0 = simplify_unary_operation (ABS, mode, op0, mode);
17122 if (mode == SFmode || mode == DFmode)
17124 if (op0 == CONST0_RTX (mode))
17125 op0 = CONST0_RTX (vmode);
17128 rtx v = ix86_build_const_vector (vmode, false, op0);
17130 op0 = force_reg (vmode, v);
17133 else if (op0 != CONST0_RTX (mode))
17134 op0 = force_reg (mode, op0);
17136 mask = ix86_build_signbit_mask (vmode, 0, 0);
17138 if (mode == SFmode)
17139 copysign_insn = gen_copysignsf3_const;
17140 else if (mode == DFmode)
17141 copysign_insn = gen_copysigndf3_const;
17143 copysign_insn = gen_copysigntf3_const;
17145 emit_insn (copysign_insn (dest, op0, op1, mask));
17149 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
17151 nmask = ix86_build_signbit_mask (vmode, 0, 1);
17152 mask = ix86_build_signbit_mask (vmode, 0, 0);
17154 if (mode == SFmode)
17155 copysign_insn = gen_copysignsf3_var;
17156 else if (mode == DFmode)
17157 copysign_insn = gen_copysigndf3_var;
17159 copysign_insn = gen_copysigntf3_var;
17161 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
17165 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
17166 be a constant, and so has already been expanded into a vector constant. */
17169 ix86_split_copysign_const (rtx operands[])
17171 enum machine_mode mode, vmode;
17172 rtx dest, op0, mask, x;
17174 dest = operands[0];
17176 mask = operands[3];
17178 mode = GET_MODE (dest);
17179 vmode = GET_MODE (mask);
17181 dest = simplify_gen_subreg (vmode, dest, mode, 0);
17182 x = gen_rtx_AND (vmode, dest, mask);
17183 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17185 if (op0 != CONST0_RTX (vmode))
17187 x = gen_rtx_IOR (vmode, dest, op0);
17188 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17192 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
17193 so we have to do two masks. */
17196 ix86_split_copysign_var (rtx operands[])
17198 enum machine_mode mode, vmode;
17199 rtx dest, scratch, op0, op1, mask, nmask, x;
17201 dest = operands[0];
17202 scratch = operands[1];
17205 nmask = operands[4];
17206 mask = operands[5];
17208 mode = GET_MODE (dest);
17209 vmode = GET_MODE (mask);
17211 if (rtx_equal_p (op0, op1))
17213 /* Shouldn't happen often (it's useless, obviously), but when it does
17214 we'd generate incorrect code if we continue below. */
17215 emit_move_insn (dest, op0);
17219 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
17221 gcc_assert (REGNO (op1) == REGNO (scratch));
17223 x = gen_rtx_AND (vmode, scratch, mask);
17224 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17227 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17228 x = gen_rtx_NOT (vmode, dest);
17229 x = gen_rtx_AND (vmode, x, op0);
17230 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17234 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
17236 x = gen_rtx_AND (vmode, scratch, mask);
17238 else /* alternative 2,4 */
17240 gcc_assert (REGNO (mask) == REGNO (scratch));
17241 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
17242 x = gen_rtx_AND (vmode, scratch, op1);
17244 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17246 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
17248 dest = simplify_gen_subreg (vmode, op0, mode, 0);
17249 x = gen_rtx_AND (vmode, dest, nmask);
17251 else /* alternative 3,4 */
17253 gcc_assert (REGNO (nmask) == REGNO (dest));
17255 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17256 x = gen_rtx_AND (vmode, dest, op0);
17258 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17261 x = gen_rtx_IOR (vmode, dest, scratch);
17262 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17265 /* Return TRUE or FALSE depending on whether the first SET in INSN
17266 has source and destination with matching CC modes, and that the
17267 CC mode is at least as constrained as REQ_MODE. */
17270 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
17273 enum machine_mode set_mode;
17275 set = PATTERN (insn);
17276 if (GET_CODE (set) == PARALLEL)
17277 set = XVECEXP (set, 0, 0);
17278 gcc_assert (GET_CODE (set) == SET);
17279 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
17281 set_mode = GET_MODE (SET_DEST (set));
17285 if (req_mode != CCNOmode
17286 && (req_mode != CCmode
17287 || XEXP (SET_SRC (set), 1) != const0_rtx))
17291 if (req_mode == CCGCmode)
17295 if (req_mode == CCGOCmode || req_mode == CCNOmode)
17299 if (req_mode == CCZmode)
17309 if (set_mode != req_mode)
17314 gcc_unreachable ();
17317 return GET_MODE (SET_SRC (set)) == set_mode;
17320 /* Generate insn patterns to do an integer compare of OPERANDS. */
17323 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
17325 enum machine_mode cmpmode;
17328 cmpmode = SELECT_CC_MODE (code, op0, op1);
17329 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
17331 /* This is very simple, but making the interface the same as in the
17332 FP case makes the rest of the code easier. */
17333 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
17334 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
17336 /* Return the test that should be put into the flags user, i.e.
17337 the bcc, scc, or cmov instruction. */
17338 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
17341 /* Figure out whether to use ordered or unordered fp comparisons.
17342 Return the appropriate mode to use. */
17345 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
17347 /* ??? In order to make all comparisons reversible, we do all comparisons
17348 non-trapping when compiling for IEEE. Once gcc is able to distinguish
17349 all forms trapping and nontrapping comparisons, we can make inequality
17350 comparisons trapping again, since it results in better code when using
17351 FCOM based compares. */
17352 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
17356 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
17358 enum machine_mode mode = GET_MODE (op0);
17360 if (SCALAR_FLOAT_MODE_P (mode))
17362 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
17363 return ix86_fp_compare_mode (code);
17368 /* Only zero flag is needed. */
17369 case EQ: /* ZF=0 */
17370 case NE: /* ZF!=0 */
17372 /* Codes needing carry flag. */
17373 case GEU: /* CF=0 */
17374 case LTU: /* CF=1 */
17375 /* Detect overflow checks. They need just the carry flag. */
17376 if (GET_CODE (op0) == PLUS
17377 && rtx_equal_p (op1, XEXP (op0, 0)))
17381 case GTU: /* CF=0 & ZF=0 */
17382 case LEU: /* CF=1 | ZF=1 */
17383 /* Detect overflow checks. They need just the carry flag. */
17384 if (GET_CODE (op0) == MINUS
17385 && rtx_equal_p (op1, XEXP (op0, 0)))
17389 /* Codes possibly doable only with sign flag when
17390 comparing against zero. */
17391 case GE: /* SF=OF or SF=0 */
17392 case LT: /* SF<>OF or SF=1 */
17393 if (op1 == const0_rtx)
17396 /* For other cases Carry flag is not required. */
17398 /* Codes doable only with sign flag when comparing
17399 against zero, but we miss jump instruction for it
17400 so we need to use relational tests against overflow
17401 that thus needs to be zero. */
17402 case GT: /* ZF=0 & SF=OF */
17403 case LE: /* ZF=1 | SF<>OF */
17404 if (op1 == const0_rtx)
17408 /* strcmp pattern do (use flags) and combine may ask us for proper
17413 gcc_unreachable ();
17417 /* Return the fixed registers used for condition codes. */
17420 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
17427 /* If two condition code modes are compatible, return a condition code
17428 mode which is compatible with both. Otherwise, return
17431 static enum machine_mode
17432 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
17437 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
17440 if ((m1 == CCGCmode && m2 == CCGOCmode)
17441 || (m1 == CCGOCmode && m2 == CCGCmode))
17447 gcc_unreachable ();
17477 /* These are only compatible with themselves, which we already
17484 /* Return a comparison we can do and that it is equivalent to
17485 swap_condition (code) apart possibly from orderedness.
17486 But, never change orderedness if TARGET_IEEE_FP, returning
17487 UNKNOWN in that case if necessary. */
17489 static enum rtx_code
17490 ix86_fp_swap_condition (enum rtx_code code)
17494 case GT: /* GTU - CF=0 & ZF=0 */
17495 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
17496 case GE: /* GEU - CF=0 */
17497 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
17498 case UNLT: /* LTU - CF=1 */
17499 return TARGET_IEEE_FP ? UNKNOWN : GT;
17500 case UNLE: /* LEU - CF=1 | ZF=1 */
17501 return TARGET_IEEE_FP ? UNKNOWN : GE;
17503 return swap_condition (code);
17507 /* Return cost of comparison CODE using the best strategy for performance.
17508 All following functions do use number of instructions as a cost metrics.
17509 In future this should be tweaked to compute bytes for optimize_size and
17510 take into account performance of various instructions on various CPUs. */
17513 ix86_fp_comparison_cost (enum rtx_code code)
17517 /* The cost of code using bit-twiddling on %ah. */
17534 arith_cost = TARGET_IEEE_FP ? 5 : 4;
17538 arith_cost = TARGET_IEEE_FP ? 6 : 4;
17541 gcc_unreachable ();
17544 switch (ix86_fp_comparison_strategy (code))
17546 case IX86_FPCMP_COMI:
17547 return arith_cost > 4 ? 3 : 2;
17548 case IX86_FPCMP_SAHF:
17549 return arith_cost > 4 ? 4 : 3;
17555 /* Return strategy to use for floating-point. We assume that fcomi is always
17556 preferrable where available, since that is also true when looking at size
17557 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
17559 enum ix86_fpcmp_strategy
17560 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
17562 /* Do fcomi/sahf based test when profitable. */
17565 return IX86_FPCMP_COMI;
17567 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
17568 return IX86_FPCMP_SAHF;
17570 return IX86_FPCMP_ARITH;
17573 /* Swap, force into registers, or otherwise massage the two operands
17574 to a fp comparison. The operands are updated in place; the new
17575 comparison code is returned. */
17577 static enum rtx_code
17578 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
17580 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
17581 rtx op0 = *pop0, op1 = *pop1;
17582 enum machine_mode op_mode = GET_MODE (op0);
17583 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
17585 /* All of the unordered compare instructions only work on registers.
17586 The same is true of the fcomi compare instructions. The XFmode
17587 compare instructions require registers except when comparing
17588 against zero or when converting operand 1 from fixed point to
17592 && (fpcmp_mode == CCFPUmode
17593 || (op_mode == XFmode
17594 && ! (standard_80387_constant_p (op0) == 1
17595 || standard_80387_constant_p (op1) == 1)
17596 && GET_CODE (op1) != FLOAT)
17597 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
17599 op0 = force_reg (op_mode, op0);
17600 op1 = force_reg (op_mode, op1);
17604 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
17605 things around if they appear profitable, otherwise force op0
17606 into a register. */
17608 if (standard_80387_constant_p (op0) == 0
17610 && ! (standard_80387_constant_p (op1) == 0
17613 enum rtx_code new_code = ix86_fp_swap_condition (code);
17614 if (new_code != UNKNOWN)
17617 tmp = op0, op0 = op1, op1 = tmp;
17623 op0 = force_reg (op_mode, op0);
17625 if (CONSTANT_P (op1))
17627 int tmp = standard_80387_constant_p (op1);
17629 op1 = validize_mem (force_const_mem (op_mode, op1));
17633 op1 = force_reg (op_mode, op1);
17636 op1 = force_reg (op_mode, op1);
17640 /* Try to rearrange the comparison to make it cheaper. */
17641 if (ix86_fp_comparison_cost (code)
17642 > ix86_fp_comparison_cost (swap_condition (code))
17643 && (REG_P (op1) || can_create_pseudo_p ()))
17646 tmp = op0, op0 = op1, op1 = tmp;
17647 code = swap_condition (code);
17649 op0 = force_reg (op_mode, op0);
17657 /* Convert comparison codes we use to represent FP comparison to integer
17658 code that will result in proper branch. Return UNKNOWN if no such code
17662 ix86_fp_compare_code_to_integer (enum rtx_code code)
17691 /* Generate insn patterns to do a floating point compare of OPERANDS. */
17694 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
17696 enum machine_mode fpcmp_mode, intcmp_mode;
17699 fpcmp_mode = ix86_fp_compare_mode (code);
17700 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
17702 /* Do fcomi/sahf based test when profitable. */
17703 switch (ix86_fp_comparison_strategy (code))
17705 case IX86_FPCMP_COMI:
17706 intcmp_mode = fpcmp_mode;
17707 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17708 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17713 case IX86_FPCMP_SAHF:
17714 intcmp_mode = fpcmp_mode;
17715 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17716 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17720 scratch = gen_reg_rtx (HImode);
17721 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
17722 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
17725 case IX86_FPCMP_ARITH:
17726 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
17727 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17728 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
17730 scratch = gen_reg_rtx (HImode);
17731 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
17733 /* In the unordered case, we have to check C2 for NaN's, which
17734 doesn't happen to work out to anything nice combination-wise.
17735 So do some bit twiddling on the value we've got in AH to come
17736 up with an appropriate set of condition codes. */
17738 intcmp_mode = CCNOmode;
17743 if (code == GT || !TARGET_IEEE_FP)
17745 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17750 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17751 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17752 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
17753 intcmp_mode = CCmode;
17759 if (code == LT && TARGET_IEEE_FP)
17761 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17762 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
17763 intcmp_mode = CCmode;
17768 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
17774 if (code == GE || !TARGET_IEEE_FP)
17776 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
17781 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17782 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
17788 if (code == LE && TARGET_IEEE_FP)
17790 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17791 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17792 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17793 intcmp_mode = CCmode;
17798 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17804 if (code == EQ && TARGET_IEEE_FP)
17806 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17807 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17808 intcmp_mode = CCmode;
17813 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17819 if (code == NE && TARGET_IEEE_FP)
17821 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17822 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
17828 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17834 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17838 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17843 gcc_unreachable ();
17851 /* Return the test that should be put into the flags user, i.e.
17852 the bcc, scc, or cmov instruction. */
17853 return gen_rtx_fmt_ee (code, VOIDmode,
17854 gen_rtx_REG (intcmp_mode, FLAGS_REG),
17859 ix86_expand_compare (enum rtx_code code, rtx op0, rtx op1)
17863 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
17864 ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
17866 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
17868 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
17869 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
17872 ret = ix86_expand_int_compare (code, op0, op1);
17878 ix86_expand_branch (enum rtx_code code, rtx op0, rtx op1, rtx label)
17880 enum machine_mode mode = GET_MODE (op0);
17892 tmp = ix86_expand_compare (code, op0, op1);
17893 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
17894 gen_rtx_LABEL_REF (VOIDmode, label),
17896 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
17903 /* Expand DImode branch into multiple compare+branch. */
17905 rtx lo[2], hi[2], label2;
17906 enum rtx_code code1, code2, code3;
17907 enum machine_mode submode;
17909 if (CONSTANT_P (op0) && !CONSTANT_P (op1))
17911 tmp = op0, op0 = op1, op1 = tmp;
17912 code = swap_condition (code);
17915 split_double_mode (mode, &op0, 1, lo+0, hi+0);
17916 split_double_mode (mode, &op1, 1, lo+1, hi+1);
17918 submode = mode == DImode ? SImode : DImode;
17920 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
17921 avoid two branches. This costs one extra insn, so disable when
17922 optimizing for size. */
17924 if ((code == EQ || code == NE)
17925 && (!optimize_insn_for_size_p ()
17926 || hi[1] == const0_rtx || lo[1] == const0_rtx))
17931 if (hi[1] != const0_rtx)
17932 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
17933 NULL_RTX, 0, OPTAB_WIDEN);
17936 if (lo[1] != const0_rtx)
17937 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
17938 NULL_RTX, 0, OPTAB_WIDEN);
17940 tmp = expand_binop (submode, ior_optab, xor1, xor0,
17941 NULL_RTX, 0, OPTAB_WIDEN);
17943 ix86_expand_branch (code, tmp, const0_rtx, label);
17947 /* Otherwise, if we are doing less-than or greater-or-equal-than,
17948 op1 is a constant and the low word is zero, then we can just
17949 examine the high word. Similarly for low word -1 and
17950 less-or-equal-than or greater-than. */
17952 if (CONST_INT_P (hi[1]))
17955 case LT: case LTU: case GE: case GEU:
17956 if (lo[1] == const0_rtx)
17958 ix86_expand_branch (code, hi[0], hi[1], label);
17962 case LE: case LEU: case GT: case GTU:
17963 if (lo[1] == constm1_rtx)
17965 ix86_expand_branch (code, hi[0], hi[1], label);
17973 /* Otherwise, we need two or three jumps. */
17975 label2 = gen_label_rtx ();
17978 code2 = swap_condition (code);
17979 code3 = unsigned_condition (code);
17983 case LT: case GT: case LTU: case GTU:
17986 case LE: code1 = LT; code2 = GT; break;
17987 case GE: code1 = GT; code2 = LT; break;
17988 case LEU: code1 = LTU; code2 = GTU; break;
17989 case GEU: code1 = GTU; code2 = LTU; break;
17991 case EQ: code1 = UNKNOWN; code2 = NE; break;
17992 case NE: code2 = UNKNOWN; break;
17995 gcc_unreachable ();
18000 * if (hi(a) < hi(b)) goto true;
18001 * if (hi(a) > hi(b)) goto false;
18002 * if (lo(a) < lo(b)) goto true;
18006 if (code1 != UNKNOWN)
18007 ix86_expand_branch (code1, hi[0], hi[1], label);
18008 if (code2 != UNKNOWN)
18009 ix86_expand_branch (code2, hi[0], hi[1], label2);
18011 ix86_expand_branch (code3, lo[0], lo[1], label);
18013 if (code2 != UNKNOWN)
18014 emit_label (label2);
18019 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC);
18024 /* Split branch based on floating point condition. */
18026 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
18027 rtx target1, rtx target2, rtx tmp, rtx pushed)
18032 if (target2 != pc_rtx)
18035 code = reverse_condition_maybe_unordered (code);
18040 condition = ix86_expand_fp_compare (code, op1, op2,
18043 /* Remove pushed operand from stack. */
18045 ix86_free_from_memory (GET_MODE (pushed));
18047 i = emit_jump_insn (gen_rtx_SET
18049 gen_rtx_IF_THEN_ELSE (VOIDmode,
18050 condition, target1, target2)));
18051 if (split_branch_probability >= 0)
18052 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
18056 ix86_expand_setcc (rtx dest, enum rtx_code code, rtx op0, rtx op1)
18060 gcc_assert (GET_MODE (dest) == QImode);
18062 ret = ix86_expand_compare (code, op0, op1);
18063 PUT_MODE (ret, QImode);
18064 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
18067 /* Expand comparison setting or clearing carry flag. Return true when
18068 successful and set pop for the operation. */
18070 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
18072 enum machine_mode mode =
18073 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
18075 /* Do not handle double-mode compares that go through special path. */
18076 if (mode == (TARGET_64BIT ? TImode : DImode))
18079 if (SCALAR_FLOAT_MODE_P (mode))
18081 rtx compare_op, compare_seq;
18083 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
18085 /* Shortcut: following common codes never translate
18086 into carry flag compares. */
18087 if (code == EQ || code == NE || code == UNEQ || code == LTGT
18088 || code == ORDERED || code == UNORDERED)
18091 /* These comparisons require zero flag; swap operands so they won't. */
18092 if ((code == GT || code == UNLE || code == LE || code == UNGT)
18093 && !TARGET_IEEE_FP)
18098 code = swap_condition (code);
18101 /* Try to expand the comparison and verify that we end up with
18102 carry flag based comparison. This fails to be true only when
18103 we decide to expand comparison using arithmetic that is not
18104 too common scenario. */
18106 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
18107 compare_seq = get_insns ();
18110 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
18111 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
18112 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
18114 code = GET_CODE (compare_op);
18116 if (code != LTU && code != GEU)
18119 emit_insn (compare_seq);
18124 if (!INTEGRAL_MODE_P (mode))
18133 /* Convert a==0 into (unsigned)a<1. */
18136 if (op1 != const0_rtx)
18139 code = (code == EQ ? LTU : GEU);
18142 /* Convert a>b into b<a or a>=b-1. */
18145 if (CONST_INT_P (op1))
18147 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
18148 /* Bail out on overflow. We still can swap operands but that
18149 would force loading of the constant into register. */
18150 if (op1 == const0_rtx
18151 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
18153 code = (code == GTU ? GEU : LTU);
18160 code = (code == GTU ? LTU : GEU);
18164 /* Convert a>=0 into (unsigned)a<0x80000000. */
18167 if (mode == DImode || op1 != const0_rtx)
18169 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18170 code = (code == LT ? GEU : LTU);
18174 if (mode == DImode || op1 != constm1_rtx)
18176 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18177 code = (code == LE ? GEU : LTU);
18183 /* Swapping operands may cause constant to appear as first operand. */
18184 if (!nonimmediate_operand (op0, VOIDmode))
18186 if (!can_create_pseudo_p ())
18188 op0 = force_reg (mode, op0);
18190 *pop = ix86_expand_compare (code, op0, op1);
18191 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
18196 ix86_expand_int_movcc (rtx operands[])
18198 enum rtx_code code = GET_CODE (operands[1]), compare_code;
18199 rtx compare_seq, compare_op;
18200 enum machine_mode mode = GET_MODE (operands[0]);
18201 bool sign_bit_compare_p = false;
18202 rtx op0 = XEXP (operands[1], 0);
18203 rtx op1 = XEXP (operands[1], 1);
18206 compare_op = ix86_expand_compare (code, op0, op1);
18207 compare_seq = get_insns ();
18210 compare_code = GET_CODE (compare_op);
18212 if ((op1 == const0_rtx && (code == GE || code == LT))
18213 || (op1 == constm1_rtx && (code == GT || code == LE)))
18214 sign_bit_compare_p = true;
18216 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
18217 HImode insns, we'd be swallowed in word prefix ops. */
18219 if ((mode != HImode || TARGET_FAST_PREFIX)
18220 && (mode != (TARGET_64BIT ? TImode : DImode))
18221 && CONST_INT_P (operands[2])
18222 && CONST_INT_P (operands[3]))
18224 rtx out = operands[0];
18225 HOST_WIDE_INT ct = INTVAL (operands[2]);
18226 HOST_WIDE_INT cf = INTVAL (operands[3]);
18227 HOST_WIDE_INT diff;
18230 /* Sign bit compares are better done using shifts than we do by using
18232 if (sign_bit_compare_p
18233 || ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
18235 /* Detect overlap between destination and compare sources. */
18238 if (!sign_bit_compare_p)
18241 bool fpcmp = false;
18243 compare_code = GET_CODE (compare_op);
18245 flags = XEXP (compare_op, 0);
18247 if (GET_MODE (flags) == CCFPmode
18248 || GET_MODE (flags) == CCFPUmode)
18252 = ix86_fp_compare_code_to_integer (compare_code);
18255 /* To simplify rest of code, restrict to the GEU case. */
18256 if (compare_code == LTU)
18258 HOST_WIDE_INT tmp = ct;
18261 compare_code = reverse_condition (compare_code);
18262 code = reverse_condition (code);
18267 PUT_CODE (compare_op,
18268 reverse_condition_maybe_unordered
18269 (GET_CODE (compare_op)));
18271 PUT_CODE (compare_op,
18272 reverse_condition (GET_CODE (compare_op)));
18276 if (reg_overlap_mentioned_p (out, op0)
18277 || reg_overlap_mentioned_p (out, op1))
18278 tmp = gen_reg_rtx (mode);
18280 if (mode == DImode)
18281 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
18283 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
18284 flags, compare_op));
18288 if (code == GT || code == GE)
18289 code = reverse_condition (code);
18292 HOST_WIDE_INT tmp = ct;
18297 tmp = emit_store_flag (tmp, code, op0, op1, VOIDmode, 0, -1);
18310 tmp = expand_simple_binop (mode, PLUS,
18312 copy_rtx (tmp), 1, OPTAB_DIRECT);
18323 tmp = expand_simple_binop (mode, IOR,
18325 copy_rtx (tmp), 1, OPTAB_DIRECT);
18327 else if (diff == -1 && ct)
18337 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18339 tmp = expand_simple_binop (mode, PLUS,
18340 copy_rtx (tmp), GEN_INT (cf),
18341 copy_rtx (tmp), 1, OPTAB_DIRECT);
18349 * andl cf - ct, dest
18359 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18362 tmp = expand_simple_binop (mode, AND,
18364 gen_int_mode (cf - ct, mode),
18365 copy_rtx (tmp), 1, OPTAB_DIRECT);
18367 tmp = expand_simple_binop (mode, PLUS,
18368 copy_rtx (tmp), GEN_INT (ct),
18369 copy_rtx (tmp), 1, OPTAB_DIRECT);
18372 if (!rtx_equal_p (tmp, out))
18373 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
18380 enum machine_mode cmp_mode = GET_MODE (op0);
18383 tmp = ct, ct = cf, cf = tmp;
18386 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18388 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18390 /* We may be reversing unordered compare to normal compare, that
18391 is not valid in general (we may convert non-trapping condition
18392 to trapping one), however on i386 we currently emit all
18393 comparisons unordered. */
18394 compare_code = reverse_condition_maybe_unordered (compare_code);
18395 code = reverse_condition_maybe_unordered (code);
18399 compare_code = reverse_condition (compare_code);
18400 code = reverse_condition (code);
18404 compare_code = UNKNOWN;
18405 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
18406 && CONST_INT_P (op1))
18408 if (op1 == const0_rtx
18409 && (code == LT || code == GE))
18410 compare_code = code;
18411 else if (op1 == constm1_rtx)
18415 else if (code == GT)
18420 /* Optimize dest = (op0 < 0) ? -1 : cf. */
18421 if (compare_code != UNKNOWN
18422 && GET_MODE (op0) == GET_MODE (out)
18423 && (cf == -1 || ct == -1))
18425 /* If lea code below could be used, only optimize
18426 if it results in a 2 insn sequence. */
18428 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
18429 || diff == 3 || diff == 5 || diff == 9)
18430 || (compare_code == LT && ct == -1)
18431 || (compare_code == GE && cf == -1))
18434 * notl op1 (if necessary)
18442 code = reverse_condition (code);
18445 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18447 out = expand_simple_binop (mode, IOR,
18449 out, 1, OPTAB_DIRECT);
18450 if (out != operands[0])
18451 emit_move_insn (operands[0], out);
18458 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
18459 || diff == 3 || diff == 5 || diff == 9)
18460 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
18462 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
18468 * lea cf(dest*(ct-cf)),dest
18472 * This also catches the degenerate setcc-only case.
18478 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18481 /* On x86_64 the lea instruction operates on Pmode, so we need
18482 to get arithmetics done in proper mode to match. */
18484 tmp = copy_rtx (out);
18488 out1 = copy_rtx (out);
18489 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
18493 tmp = gen_rtx_PLUS (mode, tmp, out1);
18499 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
18502 if (!rtx_equal_p (tmp, out))
18505 out = force_operand (tmp, copy_rtx (out));
18507 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
18509 if (!rtx_equal_p (out, operands[0]))
18510 emit_move_insn (operands[0], copy_rtx (out));
18516 * General case: Jumpful:
18517 * xorl dest,dest cmpl op1, op2
18518 * cmpl op1, op2 movl ct, dest
18519 * setcc dest jcc 1f
18520 * decl dest movl cf, dest
18521 * andl (cf-ct),dest 1:
18524 * Size 20. Size 14.
18526 * This is reasonably steep, but branch mispredict costs are
18527 * high on modern cpus, so consider failing only if optimizing
18531 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18532 && BRANCH_COST (optimize_insn_for_speed_p (),
18537 enum machine_mode cmp_mode = GET_MODE (op0);
18542 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18544 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18546 /* We may be reversing unordered compare to normal compare,
18547 that is not valid in general (we may convert non-trapping
18548 condition to trapping one), however on i386 we currently
18549 emit all comparisons unordered. */
18550 code = reverse_condition_maybe_unordered (code);
18554 code = reverse_condition (code);
18555 if (compare_code != UNKNOWN)
18556 compare_code = reverse_condition (compare_code);
18560 if (compare_code != UNKNOWN)
18562 /* notl op1 (if needed)
18567 For x < 0 (resp. x <= -1) there will be no notl,
18568 so if possible swap the constants to get rid of the
18570 True/false will be -1/0 while code below (store flag
18571 followed by decrement) is 0/-1, so the constants need
18572 to be exchanged once more. */
18574 if (compare_code == GE || !cf)
18576 code = reverse_condition (code);
18581 HOST_WIDE_INT tmp = cf;
18586 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18590 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18592 out = expand_simple_binop (mode, PLUS, copy_rtx (out),
18594 copy_rtx (out), 1, OPTAB_DIRECT);
18597 out = expand_simple_binop (mode, AND, copy_rtx (out),
18598 gen_int_mode (cf - ct, mode),
18599 copy_rtx (out), 1, OPTAB_DIRECT);
18601 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
18602 copy_rtx (out), 1, OPTAB_DIRECT);
18603 if (!rtx_equal_p (out, operands[0]))
18604 emit_move_insn (operands[0], copy_rtx (out));
18610 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18612 /* Try a few things more with specific constants and a variable. */
18615 rtx var, orig_out, out, tmp;
18617 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
18620 /* If one of the two operands is an interesting constant, load a
18621 constant with the above and mask it in with a logical operation. */
18623 if (CONST_INT_P (operands[2]))
18626 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
18627 operands[3] = constm1_rtx, op = and_optab;
18628 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
18629 operands[3] = const0_rtx, op = ior_optab;
18633 else if (CONST_INT_P (operands[3]))
18636 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
18637 operands[2] = constm1_rtx, op = and_optab;
18638 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
18639 operands[2] = const0_rtx, op = ior_optab;
18646 orig_out = operands[0];
18647 tmp = gen_reg_rtx (mode);
18650 /* Recurse to get the constant loaded. */
18651 if (ix86_expand_int_movcc (operands) == 0)
18654 /* Mask in the interesting variable. */
18655 out = expand_binop (mode, op, var, tmp, orig_out, 0,
18657 if (!rtx_equal_p (out, orig_out))
18658 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
18664 * For comparison with above,
18674 if (! nonimmediate_operand (operands[2], mode))
18675 operands[2] = force_reg (mode, operands[2]);
18676 if (! nonimmediate_operand (operands[3], mode))
18677 operands[3] = force_reg (mode, operands[3]);
18679 if (! register_operand (operands[2], VOIDmode)
18681 || ! register_operand (operands[3], VOIDmode)))
18682 operands[2] = force_reg (mode, operands[2]);
18685 && ! register_operand (operands[3], VOIDmode))
18686 operands[3] = force_reg (mode, operands[3]);
18688 emit_insn (compare_seq);
18689 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18690 gen_rtx_IF_THEN_ELSE (mode,
18691 compare_op, operands[2],
18696 /* Swap, force into registers, or otherwise massage the two operands
18697 to an sse comparison with a mask result. Thus we differ a bit from
18698 ix86_prepare_fp_compare_args which expects to produce a flags result.
18700 The DEST operand exists to help determine whether to commute commutative
18701 operators. The POP0/POP1 operands are updated in place. The new
18702 comparison code is returned, or UNKNOWN if not implementable. */
18704 static enum rtx_code
18705 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
18706 rtx *pop0, rtx *pop1)
18714 /* We have no LTGT as an operator. We could implement it with
18715 NE & ORDERED, but this requires an extra temporary. It's
18716 not clear that it's worth it. */
18723 /* These are supported directly. */
18730 /* For commutative operators, try to canonicalize the destination
18731 operand to be first in the comparison - this helps reload to
18732 avoid extra moves. */
18733 if (!dest || !rtx_equal_p (dest, *pop1))
18741 /* These are not supported directly. Swap the comparison operands
18742 to transform into something that is supported. */
18746 code = swap_condition (code);
18750 gcc_unreachable ();
18756 /* Detect conditional moves that exactly match min/max operational
18757 semantics. Note that this is IEEE safe, as long as we don't
18758 interchange the operands.
18760 Returns FALSE if this conditional move doesn't match a MIN/MAX,
18761 and TRUE if the operation is successful and instructions are emitted. */
18764 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
18765 rtx cmp_op1, rtx if_true, rtx if_false)
18767 enum machine_mode mode;
18773 else if (code == UNGE)
18776 if_true = if_false;
18782 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
18784 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
18789 mode = GET_MODE (dest);
18791 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
18792 but MODE may be a vector mode and thus not appropriate. */
18793 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
18795 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
18798 if_true = force_reg (mode, if_true);
18799 v = gen_rtvec (2, if_true, if_false);
18800 tmp = gen_rtx_UNSPEC (mode, v, u);
18804 code = is_min ? SMIN : SMAX;
18805 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
18808 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
18812 /* Expand an sse vector comparison. Return the register with the result. */
18815 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
18816 rtx op_true, rtx op_false)
18818 enum machine_mode mode = GET_MODE (dest);
18821 cmp_op0 = force_reg (mode, cmp_op0);
18822 if (!nonimmediate_operand (cmp_op1, mode))
18823 cmp_op1 = force_reg (mode, cmp_op1);
18826 || reg_overlap_mentioned_p (dest, op_true)
18827 || reg_overlap_mentioned_p (dest, op_false))
18828 dest = gen_reg_rtx (mode);
18830 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
18831 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18836 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
18837 operations. This is used for both scalar and vector conditional moves. */
18840 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
18842 enum machine_mode mode = GET_MODE (dest);
18845 if (op_false == CONST0_RTX (mode))
18847 op_true = force_reg (mode, op_true);
18848 x = gen_rtx_AND (mode, cmp, op_true);
18849 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18851 else if (op_true == CONST0_RTX (mode))
18853 op_false = force_reg (mode, op_false);
18854 x = gen_rtx_NOT (mode, cmp);
18855 x = gen_rtx_AND (mode, x, op_false);
18856 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18858 else if (TARGET_XOP)
18860 rtx pcmov = gen_rtx_SET (mode, dest,
18861 gen_rtx_IF_THEN_ELSE (mode, cmp,
18868 op_true = force_reg (mode, op_true);
18869 op_false = force_reg (mode, op_false);
18871 t2 = gen_reg_rtx (mode);
18873 t3 = gen_reg_rtx (mode);
18877 x = gen_rtx_AND (mode, op_true, cmp);
18878 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
18880 x = gen_rtx_NOT (mode, cmp);
18881 x = gen_rtx_AND (mode, x, op_false);
18882 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
18884 x = gen_rtx_IOR (mode, t3, t2);
18885 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18889 /* Expand a floating-point conditional move. Return true if successful. */
18892 ix86_expand_fp_movcc (rtx operands[])
18894 enum machine_mode mode = GET_MODE (operands[0]);
18895 enum rtx_code code = GET_CODE (operands[1]);
18896 rtx tmp, compare_op;
18897 rtx op0 = XEXP (operands[1], 0);
18898 rtx op1 = XEXP (operands[1], 1);
18900 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
18902 enum machine_mode cmode;
18904 /* Since we've no cmove for sse registers, don't force bad register
18905 allocation just to gain access to it. Deny movcc when the
18906 comparison mode doesn't match the move mode. */
18907 cmode = GET_MODE (op0);
18908 if (cmode == VOIDmode)
18909 cmode = GET_MODE (op1);
18913 code = ix86_prepare_sse_fp_compare_args (operands[0], code, &op0, &op1);
18914 if (code == UNKNOWN)
18917 if (ix86_expand_sse_fp_minmax (operands[0], code, op0, op1,
18918 operands[2], operands[3]))
18921 tmp = ix86_expand_sse_cmp (operands[0], code, op0, op1,
18922 operands[2], operands[3]);
18923 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
18927 /* The floating point conditional move instructions don't directly
18928 support conditions resulting from a signed integer comparison. */
18930 compare_op = ix86_expand_compare (code, op0, op1);
18931 if (!fcmov_comparison_operator (compare_op, VOIDmode))
18933 tmp = gen_reg_rtx (QImode);
18934 ix86_expand_setcc (tmp, code, op0, op1);
18936 compare_op = ix86_expand_compare (NE, tmp, const0_rtx);
18939 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18940 gen_rtx_IF_THEN_ELSE (mode, compare_op,
18941 operands[2], operands[3])));
18946 /* Expand a floating-point vector conditional move; a vcond operation
18947 rather than a movcc operation. */
18950 ix86_expand_fp_vcond (rtx operands[])
18952 enum rtx_code code = GET_CODE (operands[3]);
18955 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
18956 &operands[4], &operands[5]);
18957 if (code == UNKNOWN)
18960 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
18961 operands[5], operands[1], operands[2]))
18964 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
18965 operands[1], operands[2]);
18966 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
18970 /* Expand a signed/unsigned integral vector conditional move. */
18973 ix86_expand_int_vcond (rtx operands[])
18975 enum machine_mode mode = GET_MODE (operands[0]);
18976 enum rtx_code code = GET_CODE (operands[3]);
18977 bool negate = false;
18980 cop0 = operands[4];
18981 cop1 = operands[5];
18983 /* XOP supports all of the comparisons on all vector int types. */
18986 /* Canonicalize the comparison to EQ, GT, GTU. */
18997 code = reverse_condition (code);
19003 code = reverse_condition (code);
19009 code = swap_condition (code);
19010 x = cop0, cop0 = cop1, cop1 = x;
19014 gcc_unreachable ();
19017 /* Only SSE4.1/SSE4.2 supports V2DImode. */
19018 if (mode == V2DImode)
19023 /* SSE4.1 supports EQ. */
19024 if (!TARGET_SSE4_1)
19030 /* SSE4.2 supports GT/GTU. */
19031 if (!TARGET_SSE4_2)
19036 gcc_unreachable ();
19040 /* Unsigned parallel compare is not supported by the hardware.
19041 Play some tricks to turn this into a signed comparison
19045 cop0 = force_reg (mode, cop0);
19053 rtx (*gen_sub3) (rtx, rtx, rtx);
19055 /* Subtract (-(INT MAX) - 1) from both operands to make
19057 mask = ix86_build_signbit_mask (mode, true, false);
19058 gen_sub3 = (mode == V4SImode
19059 ? gen_subv4si3 : gen_subv2di3);
19060 t1 = gen_reg_rtx (mode);
19061 emit_insn (gen_sub3 (t1, cop0, mask));
19063 t2 = gen_reg_rtx (mode);
19064 emit_insn (gen_sub3 (t2, cop1, mask));
19074 /* Perform a parallel unsigned saturating subtraction. */
19075 x = gen_reg_rtx (mode);
19076 emit_insn (gen_rtx_SET (VOIDmode, x,
19077 gen_rtx_US_MINUS (mode, cop0, cop1)));
19080 cop1 = CONST0_RTX (mode);
19086 gcc_unreachable ();
19091 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
19092 operands[1+negate], operands[2-negate]);
19094 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
19095 operands[2-negate]);
19099 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
19100 true if we should do zero extension, else sign extension. HIGH_P is
19101 true if we want the N/2 high elements, else the low elements. */
19104 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
19106 enum machine_mode imode = GET_MODE (operands[1]);
19111 rtx (*unpack)(rtx, rtx);
19117 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
19119 unpack = gen_sse4_1_sign_extendv8qiv8hi2;
19123 unpack = gen_sse4_1_zero_extendv4hiv4si2;
19125 unpack = gen_sse4_1_sign_extendv4hiv4si2;
19129 unpack = gen_sse4_1_zero_extendv2siv2di2;
19131 unpack = gen_sse4_1_sign_extendv2siv2di2;
19134 gcc_unreachable ();
19139 /* Shift higher 8 bytes to lower 8 bytes. */
19140 tmp = gen_reg_rtx (imode);
19141 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, tmp),
19142 gen_lowpart (V1TImode, operands[1]),
19148 emit_insn (unpack (operands[0], tmp));
19152 rtx (*unpack)(rtx, rtx, rtx);
19158 unpack = gen_vec_interleave_highv16qi;
19160 unpack = gen_vec_interleave_lowv16qi;
19164 unpack = gen_vec_interleave_highv8hi;
19166 unpack = gen_vec_interleave_lowv8hi;
19170 unpack = gen_vec_interleave_highv4si;
19172 unpack = gen_vec_interleave_lowv4si;
19175 gcc_unreachable ();
19178 dest = gen_lowpart (imode, operands[0]);
19181 tmp = force_reg (imode, CONST0_RTX (imode));
19183 tmp = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
19184 operands[1], pc_rtx, pc_rtx);
19186 emit_insn (unpack (dest, operands[1], tmp));
19190 /* Expand conditional increment or decrement using adb/sbb instructions.
19191 The default case using setcc followed by the conditional move can be
19192 done by generic code. */
19194 ix86_expand_int_addcc (rtx operands[])
19196 enum rtx_code code = GET_CODE (operands[1]);
19198 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
19200 rtx val = const0_rtx;
19201 bool fpcmp = false;
19202 enum machine_mode mode;
19203 rtx op0 = XEXP (operands[1], 0);
19204 rtx op1 = XEXP (operands[1], 1);
19206 if (operands[3] != const1_rtx
19207 && operands[3] != constm1_rtx)
19209 if (!ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
19211 code = GET_CODE (compare_op);
19213 flags = XEXP (compare_op, 0);
19215 if (GET_MODE (flags) == CCFPmode
19216 || GET_MODE (flags) == CCFPUmode)
19219 code = ix86_fp_compare_code_to_integer (code);
19226 PUT_CODE (compare_op,
19227 reverse_condition_maybe_unordered
19228 (GET_CODE (compare_op)));
19230 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
19233 mode = GET_MODE (operands[0]);
19235 /* Construct either adc or sbb insn. */
19236 if ((code == LTU) == (operands[3] == constm1_rtx))
19241 insn = gen_subqi3_carry;
19244 insn = gen_subhi3_carry;
19247 insn = gen_subsi3_carry;
19250 insn = gen_subdi3_carry;
19253 gcc_unreachable ();
19261 insn = gen_addqi3_carry;
19264 insn = gen_addhi3_carry;
19267 insn = gen_addsi3_carry;
19270 insn = gen_adddi3_carry;
19273 gcc_unreachable ();
19276 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
19282 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
19283 but works for floating pointer parameters and nonoffsetable memories.
19284 For pushes, it returns just stack offsets; the values will be saved
19285 in the right order. Maximally three parts are generated. */
19288 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
19293 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
19295 size = (GET_MODE_SIZE (mode) + 4) / 8;
19297 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
19298 gcc_assert (size >= 2 && size <= 4);
19300 /* Optimize constant pool reference to immediates. This is used by fp
19301 moves, that force all constants to memory to allow combining. */
19302 if (MEM_P (operand) && MEM_READONLY_P (operand))
19304 rtx tmp = maybe_get_pool_constant (operand);
19309 if (MEM_P (operand) && !offsettable_memref_p (operand))
19311 /* The only non-offsetable memories we handle are pushes. */
19312 int ok = push_operand (operand, VOIDmode);
19316 operand = copy_rtx (operand);
19317 PUT_MODE (operand, Pmode);
19318 parts[0] = parts[1] = parts[2] = parts[3] = operand;
19322 if (GET_CODE (operand) == CONST_VECTOR)
19324 enum machine_mode imode = int_mode_for_mode (mode);
19325 /* Caution: if we looked through a constant pool memory above,
19326 the operand may actually have a different mode now. That's
19327 ok, since we want to pun this all the way back to an integer. */
19328 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
19329 gcc_assert (operand != NULL);
19335 if (mode == DImode)
19336 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
19341 if (REG_P (operand))
19343 gcc_assert (reload_completed);
19344 for (i = 0; i < size; i++)
19345 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
19347 else if (offsettable_memref_p (operand))
19349 operand = adjust_address (operand, SImode, 0);
19350 parts[0] = operand;
19351 for (i = 1; i < size; i++)
19352 parts[i] = adjust_address (operand, SImode, 4 * i);
19354 else if (GET_CODE (operand) == CONST_DOUBLE)
19359 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19363 real_to_target (l, &r, mode);
19364 parts[3] = gen_int_mode (l[3], SImode);
19365 parts[2] = gen_int_mode (l[2], SImode);
19368 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
19369 parts[2] = gen_int_mode (l[2], SImode);
19372 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
19375 gcc_unreachable ();
19377 parts[1] = gen_int_mode (l[1], SImode);
19378 parts[0] = gen_int_mode (l[0], SImode);
19381 gcc_unreachable ();
19386 if (mode == TImode)
19387 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
19388 if (mode == XFmode || mode == TFmode)
19390 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
19391 if (REG_P (operand))
19393 gcc_assert (reload_completed);
19394 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
19395 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
19397 else if (offsettable_memref_p (operand))
19399 operand = adjust_address (operand, DImode, 0);
19400 parts[0] = operand;
19401 parts[1] = adjust_address (operand, upper_mode, 8);
19403 else if (GET_CODE (operand) == CONST_DOUBLE)
19408 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
19409 real_to_target (l, &r, mode);
19411 /* Do not use shift by 32 to avoid warning on 32bit systems. */
19412 if (HOST_BITS_PER_WIDE_INT >= 64)
19415 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
19416 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
19419 parts[0] = immed_double_const (l[0], l[1], DImode);
19421 if (upper_mode == SImode)
19422 parts[1] = gen_int_mode (l[2], SImode);
19423 else if (HOST_BITS_PER_WIDE_INT >= 64)
19426 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
19427 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
19430 parts[1] = immed_double_const (l[2], l[3], DImode);
19433 gcc_unreachable ();
19440 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
19441 Return false when normal moves are needed; true when all required
19442 insns have been emitted. Operands 2-4 contain the input values
19443 int the correct order; operands 5-7 contain the output values. */
19446 ix86_split_long_move (rtx operands[])
19451 int collisions = 0;
19452 enum machine_mode mode = GET_MODE (operands[0]);
19453 bool collisionparts[4];
19455 /* The DFmode expanders may ask us to move double.
19456 For 64bit target this is single move. By hiding the fact
19457 here we simplify i386.md splitters. */
19458 if (TARGET_64BIT && GET_MODE_SIZE (GET_MODE (operands[0])) == 8)
19460 /* Optimize constant pool reference to immediates. This is used by
19461 fp moves, that force all constants to memory to allow combining. */
19463 if (MEM_P (operands[1])
19464 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
19465 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
19466 operands[1] = get_pool_constant (XEXP (operands[1], 0));
19467 if (push_operand (operands[0], VOIDmode))
19469 operands[0] = copy_rtx (operands[0]);
19470 PUT_MODE (operands[0], Pmode);
19473 operands[0] = gen_lowpart (DImode, operands[0]);
19474 operands[1] = gen_lowpart (DImode, operands[1]);
19475 emit_move_insn (operands[0], operands[1]);
19479 /* The only non-offsettable memory we handle is push. */
19480 if (push_operand (operands[0], VOIDmode))
19483 gcc_assert (!MEM_P (operands[0])
19484 || offsettable_memref_p (operands[0]));
19486 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
19487 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
19489 /* When emitting push, take care for source operands on the stack. */
19490 if (push && MEM_P (operands[1])
19491 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
19493 rtx src_base = XEXP (part[1][nparts - 1], 0);
19495 /* Compensate for the stack decrement by 4. */
19496 if (!TARGET_64BIT && nparts == 3
19497 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
19498 src_base = plus_constant (src_base, 4);
19500 /* src_base refers to the stack pointer and is
19501 automatically decreased by emitted push. */
19502 for (i = 0; i < nparts; i++)
19503 part[1][i] = change_address (part[1][i],
19504 GET_MODE (part[1][i]), src_base);
19507 /* We need to do copy in the right order in case an address register
19508 of the source overlaps the destination. */
19509 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
19513 for (i = 0; i < nparts; i++)
19516 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
19517 if (collisionparts[i])
19521 /* Collision in the middle part can be handled by reordering. */
19522 if (collisions == 1 && nparts == 3 && collisionparts [1])
19524 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19525 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19527 else if (collisions == 1
19529 && (collisionparts [1] || collisionparts [2]))
19531 if (collisionparts [1])
19533 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19534 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19538 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
19539 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
19543 /* If there are more collisions, we can't handle it by reordering.
19544 Do an lea to the last part and use only one colliding move. */
19545 else if (collisions > 1)
19551 base = part[0][nparts - 1];
19553 /* Handle the case when the last part isn't valid for lea.
19554 Happens in 64-bit mode storing the 12-byte XFmode. */
19555 if (GET_MODE (base) != Pmode)
19556 base = gen_rtx_REG (Pmode, REGNO (base));
19558 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
19559 part[1][0] = replace_equiv_address (part[1][0], base);
19560 for (i = 1; i < nparts; i++)
19562 tmp = plus_constant (base, UNITS_PER_WORD * i);
19563 part[1][i] = replace_equiv_address (part[1][i], tmp);
19574 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
19575 emit_insn (gen_addsi3 (stack_pointer_rtx,
19576 stack_pointer_rtx, GEN_INT (-4)));
19577 emit_move_insn (part[0][2], part[1][2]);
19579 else if (nparts == 4)
19581 emit_move_insn (part[0][3], part[1][3]);
19582 emit_move_insn (part[0][2], part[1][2]);
19587 /* In 64bit mode we don't have 32bit push available. In case this is
19588 register, it is OK - we will just use larger counterpart. We also
19589 retype memory - these comes from attempt to avoid REX prefix on
19590 moving of second half of TFmode value. */
19591 if (GET_MODE (part[1][1]) == SImode)
19593 switch (GET_CODE (part[1][1]))
19596 part[1][1] = adjust_address (part[1][1], DImode, 0);
19600 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
19604 gcc_unreachable ();
19607 if (GET_MODE (part[1][0]) == SImode)
19608 part[1][0] = part[1][1];
19611 emit_move_insn (part[0][1], part[1][1]);
19612 emit_move_insn (part[0][0], part[1][0]);
19616 /* Choose correct order to not overwrite the source before it is copied. */
19617 if ((REG_P (part[0][0])
19618 && REG_P (part[1][1])
19619 && (REGNO (part[0][0]) == REGNO (part[1][1])
19621 && REGNO (part[0][0]) == REGNO (part[1][2]))
19623 && REGNO (part[0][0]) == REGNO (part[1][3]))))
19625 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
19627 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
19629 operands[2 + i] = part[0][j];
19630 operands[6 + i] = part[1][j];
19635 for (i = 0; i < nparts; i++)
19637 operands[2 + i] = part[0][i];
19638 operands[6 + i] = part[1][i];
19642 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
19643 if (optimize_insn_for_size_p ())
19645 for (j = 0; j < nparts - 1; j++)
19646 if (CONST_INT_P (operands[6 + j])
19647 && operands[6 + j] != const0_rtx
19648 && REG_P (operands[2 + j]))
19649 for (i = j; i < nparts - 1; i++)
19650 if (CONST_INT_P (operands[7 + i])
19651 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
19652 operands[7 + i] = operands[2 + j];
19655 for (i = 0; i < nparts; i++)
19656 emit_move_insn (operands[2 + i], operands[6 + i]);
19661 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
19662 left shift by a constant, either using a single shift or
19663 a sequence of add instructions. */
19666 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
19668 rtx (*insn)(rtx, rtx, rtx);
19671 || (count * ix86_cost->add <= ix86_cost->shift_const
19672 && !optimize_insn_for_size_p ()))
19674 insn = mode == DImode ? gen_addsi3 : gen_adddi3;
19675 while (count-- > 0)
19676 emit_insn (insn (operand, operand, operand));
19680 insn = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19681 emit_insn (insn (operand, operand, GEN_INT (count)));
19686 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
19688 rtx (*gen_ashl3)(rtx, rtx, rtx);
19689 rtx (*gen_shld)(rtx, rtx, rtx);
19690 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19692 rtx low[2], high[2];
19695 if (CONST_INT_P (operands[2]))
19697 split_double_mode (mode, operands, 2, low, high);
19698 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19700 if (count >= half_width)
19702 emit_move_insn (high[0], low[1]);
19703 emit_move_insn (low[0], const0_rtx);
19705 if (count > half_width)
19706 ix86_expand_ashl_const (high[0], count - half_width, mode);
19710 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19712 if (!rtx_equal_p (operands[0], operands[1]))
19713 emit_move_insn (operands[0], operands[1]);
19715 emit_insn (gen_shld (high[0], low[0], GEN_INT (count)));
19716 ix86_expand_ashl_const (low[0], count, mode);
19721 split_double_mode (mode, operands, 1, low, high);
19723 gen_ashl3 = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19725 if (operands[1] == const1_rtx)
19727 /* Assuming we've chosen a QImode capable registers, then 1 << N
19728 can be done with two 32/64-bit shifts, no branches, no cmoves. */
19729 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
19731 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
19733 ix86_expand_clear (low[0]);
19734 ix86_expand_clear (high[0]);
19735 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (half_width)));
19737 d = gen_lowpart (QImode, low[0]);
19738 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19739 s = gen_rtx_EQ (QImode, flags, const0_rtx);
19740 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19742 d = gen_lowpart (QImode, high[0]);
19743 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19744 s = gen_rtx_NE (QImode, flags, const0_rtx);
19745 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19748 /* Otherwise, we can get the same results by manually performing
19749 a bit extract operation on bit 5/6, and then performing the two
19750 shifts. The two methods of getting 0/1 into low/high are exactly
19751 the same size. Avoiding the shift in the bit extract case helps
19752 pentium4 a bit; no one else seems to care much either way. */
19755 enum machine_mode half_mode;
19756 rtx (*gen_lshr3)(rtx, rtx, rtx);
19757 rtx (*gen_and3)(rtx, rtx, rtx);
19758 rtx (*gen_xor3)(rtx, rtx, rtx);
19759 HOST_WIDE_INT bits;
19762 if (mode == DImode)
19764 half_mode = SImode;
19765 gen_lshr3 = gen_lshrsi3;
19766 gen_and3 = gen_andsi3;
19767 gen_xor3 = gen_xorsi3;
19772 half_mode = DImode;
19773 gen_lshr3 = gen_lshrdi3;
19774 gen_and3 = gen_anddi3;
19775 gen_xor3 = gen_xordi3;
19779 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
19780 x = gen_rtx_ZERO_EXTEND (half_mode, operands[2]);
19782 x = gen_lowpart (half_mode, operands[2]);
19783 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
19785 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (bits)));
19786 emit_insn (gen_and3 (high[0], high[0], const1_rtx));
19787 emit_move_insn (low[0], high[0]);
19788 emit_insn (gen_xor3 (low[0], low[0], const1_rtx));
19791 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19792 emit_insn (gen_ashl3 (high[0], high[0], operands[2]));
19796 if (operands[1] == constm1_rtx)
19798 /* For -1 << N, we can avoid the shld instruction, because we
19799 know that we're shifting 0...31/63 ones into a -1. */
19800 emit_move_insn (low[0], constm1_rtx);
19801 if (optimize_insn_for_size_p ())
19802 emit_move_insn (high[0], low[0]);
19804 emit_move_insn (high[0], constm1_rtx);
19808 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19810 if (!rtx_equal_p (operands[0], operands[1]))
19811 emit_move_insn (operands[0], operands[1]);
19813 split_double_mode (mode, operands, 1, low, high);
19814 emit_insn (gen_shld (high[0], low[0], operands[2]));
19817 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19819 if (TARGET_CMOVE && scratch)
19821 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19822 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19824 ix86_expand_clear (scratch);
19825 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
19829 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19830 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19832 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
19837 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
19839 rtx (*gen_ashr3)(rtx, rtx, rtx)
19840 = mode == DImode ? gen_ashrsi3 : gen_ashrdi3;
19841 rtx (*gen_shrd)(rtx, rtx, rtx);
19842 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19844 rtx low[2], high[2];
19847 if (CONST_INT_P (operands[2]))
19849 split_double_mode (mode, operands, 2, low, high);
19850 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19852 if (count == GET_MODE_BITSIZE (mode) - 1)
19854 emit_move_insn (high[0], high[1]);
19855 emit_insn (gen_ashr3 (high[0], high[0],
19856 GEN_INT (half_width - 1)));
19857 emit_move_insn (low[0], high[0]);
19860 else if (count >= half_width)
19862 emit_move_insn (low[0], high[1]);
19863 emit_move_insn (high[0], low[0]);
19864 emit_insn (gen_ashr3 (high[0], high[0],
19865 GEN_INT (half_width - 1)));
19867 if (count > half_width)
19868 emit_insn (gen_ashr3 (low[0], low[0],
19869 GEN_INT (count - half_width)));
19873 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19875 if (!rtx_equal_p (operands[0], operands[1]))
19876 emit_move_insn (operands[0], operands[1]);
19878 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19879 emit_insn (gen_ashr3 (high[0], high[0], GEN_INT (count)));
19884 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19886 if (!rtx_equal_p (operands[0], operands[1]))
19887 emit_move_insn (operands[0], operands[1]);
19889 split_double_mode (mode, operands, 1, low, high);
19891 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19892 emit_insn (gen_ashr3 (high[0], high[0], operands[2]));
19894 if (TARGET_CMOVE && scratch)
19896 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19897 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19899 emit_move_insn (scratch, high[0]);
19900 emit_insn (gen_ashr3 (scratch, scratch,
19901 GEN_INT (half_width - 1)));
19902 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19907 rtx (*gen_x86_shift_adj_3)(rtx, rtx, rtx)
19908 = mode == DImode ? gen_x86_shiftsi_adj_3 : gen_x86_shiftdi_adj_3;
19910 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
19916 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
19918 rtx (*gen_lshr3)(rtx, rtx, rtx)
19919 = mode == DImode ? gen_lshrsi3 : gen_lshrdi3;
19920 rtx (*gen_shrd)(rtx, rtx, rtx);
19921 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19923 rtx low[2], high[2];
19926 if (CONST_INT_P (operands[2]))
19928 split_double_mode (mode, operands, 2, low, high);
19929 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19931 if (count >= half_width)
19933 emit_move_insn (low[0], high[1]);
19934 ix86_expand_clear (high[0]);
19936 if (count > half_width)
19937 emit_insn (gen_lshr3 (low[0], low[0],
19938 GEN_INT (count - half_width)));
19942 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19944 if (!rtx_equal_p (operands[0], operands[1]))
19945 emit_move_insn (operands[0], operands[1]);
19947 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19948 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (count)));
19953 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19955 if (!rtx_equal_p (operands[0], operands[1]))
19956 emit_move_insn (operands[0], operands[1]);
19958 split_double_mode (mode, operands, 1, low, high);
19960 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19961 emit_insn (gen_lshr3 (high[0], high[0], operands[2]));
19963 if (TARGET_CMOVE && scratch)
19965 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19966 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19968 ix86_expand_clear (scratch);
19969 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19974 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19975 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19977 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
19982 /* Predict just emitted jump instruction to be taken with probability PROB. */
19984 predict_jump (int prob)
19986 rtx insn = get_last_insn ();
19987 gcc_assert (JUMP_P (insn));
19988 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
19991 /* Helper function for the string operations below. Dest VARIABLE whether
19992 it is aligned to VALUE bytes. If true, jump to the label. */
19994 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
19996 rtx label = gen_label_rtx ();
19997 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
19998 if (GET_MODE (variable) == DImode)
19999 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
20001 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
20002 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
20005 predict_jump (REG_BR_PROB_BASE * 50 / 100);
20007 predict_jump (REG_BR_PROB_BASE * 90 / 100);
20011 /* Adjust COUNTER by the VALUE. */
20013 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
20015 rtx (*gen_add)(rtx, rtx, rtx)
20016 = GET_MODE (countreg) == DImode ? gen_adddi3 : gen_addsi3;
20018 emit_insn (gen_add (countreg, countreg, GEN_INT (-value)));
20021 /* Zero extend possibly SImode EXP to Pmode register. */
20023 ix86_zero_extend_to_Pmode (rtx exp)
20026 if (GET_MODE (exp) == VOIDmode)
20027 return force_reg (Pmode, exp);
20028 if (GET_MODE (exp) == Pmode)
20029 return copy_to_mode_reg (Pmode, exp);
20030 r = gen_reg_rtx (Pmode);
20031 emit_insn (gen_zero_extendsidi2 (r, exp));
20035 /* Divide COUNTREG by SCALE. */
20037 scale_counter (rtx countreg, int scale)
20043 if (CONST_INT_P (countreg))
20044 return GEN_INT (INTVAL (countreg) / scale);
20045 gcc_assert (REG_P (countreg));
20047 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
20048 GEN_INT (exact_log2 (scale)),
20049 NULL, 1, OPTAB_DIRECT);
20053 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
20054 DImode for constant loop counts. */
20056 static enum machine_mode
20057 counter_mode (rtx count_exp)
20059 if (GET_MODE (count_exp) != VOIDmode)
20060 return GET_MODE (count_exp);
20061 if (!CONST_INT_P (count_exp))
20063 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
20068 /* When SRCPTR is non-NULL, output simple loop to move memory
20069 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
20070 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
20071 equivalent loop to set memory by VALUE (supposed to be in MODE).
20073 The size is rounded down to whole number of chunk size moved at once.
20074 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
20078 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
20079 rtx destptr, rtx srcptr, rtx value,
20080 rtx count, enum machine_mode mode, int unroll,
20083 rtx out_label, top_label, iter, tmp;
20084 enum machine_mode iter_mode = counter_mode (count);
20085 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
20086 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
20092 top_label = gen_label_rtx ();
20093 out_label = gen_label_rtx ();
20094 iter = gen_reg_rtx (iter_mode);
20096 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
20097 NULL, 1, OPTAB_DIRECT);
20098 /* Those two should combine. */
20099 if (piece_size == const1_rtx)
20101 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
20103 predict_jump (REG_BR_PROB_BASE * 10 / 100);
20105 emit_move_insn (iter, const0_rtx);
20107 emit_label (top_label);
20109 tmp = convert_modes (Pmode, iter_mode, iter, true);
20110 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
20111 destmem = change_address (destmem, mode, x_addr);
20115 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
20116 srcmem = change_address (srcmem, mode, y_addr);
20118 /* When unrolling for chips that reorder memory reads and writes,
20119 we can save registers by using single temporary.
20120 Also using 4 temporaries is overkill in 32bit mode. */
20121 if (!TARGET_64BIT && 0)
20123 for (i = 0; i < unroll; i++)
20128 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20130 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20132 emit_move_insn (destmem, srcmem);
20138 gcc_assert (unroll <= 4);
20139 for (i = 0; i < unroll; i++)
20141 tmpreg[i] = gen_reg_rtx (mode);
20145 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20147 emit_move_insn (tmpreg[i], srcmem);
20149 for (i = 0; i < unroll; i++)
20154 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20156 emit_move_insn (destmem, tmpreg[i]);
20161 for (i = 0; i < unroll; i++)
20165 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20166 emit_move_insn (destmem, value);
20169 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
20170 true, OPTAB_LIB_WIDEN);
20172 emit_move_insn (iter, tmp);
20174 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
20176 if (expected_size != -1)
20178 expected_size /= GET_MODE_SIZE (mode) * unroll;
20179 if (expected_size == 0)
20181 else if (expected_size > REG_BR_PROB_BASE)
20182 predict_jump (REG_BR_PROB_BASE - 1);
20184 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
20187 predict_jump (REG_BR_PROB_BASE * 80 / 100);
20188 iter = ix86_zero_extend_to_Pmode (iter);
20189 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
20190 true, OPTAB_LIB_WIDEN);
20191 if (tmp != destptr)
20192 emit_move_insn (destptr, tmp);
20195 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
20196 true, OPTAB_LIB_WIDEN);
20198 emit_move_insn (srcptr, tmp);
20200 emit_label (out_label);
20203 /* Output "rep; mov" instruction.
20204 Arguments have same meaning as for previous function */
20206 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
20207 rtx destptr, rtx srcptr,
20209 enum machine_mode mode)
20215 /* If the size is known, it is shorter to use rep movs. */
20216 if (mode == QImode && CONST_INT_P (count)
20217 && !(INTVAL (count) & 3))
20220 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
20221 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
20222 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
20223 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
20224 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
20225 if (mode != QImode)
20227 destexp = gen_rtx_ASHIFT (Pmode, countreg,
20228 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20229 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
20230 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
20231 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20232 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
20236 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
20237 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
20239 if (CONST_INT_P (count))
20241 count = GEN_INT (INTVAL (count)
20242 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
20243 destmem = shallow_copy_rtx (destmem);
20244 srcmem = shallow_copy_rtx (srcmem);
20245 set_mem_size (destmem, count);
20246 set_mem_size (srcmem, count);
20250 if (MEM_SIZE (destmem))
20251 set_mem_size (destmem, NULL_RTX);
20252 if (MEM_SIZE (srcmem))
20253 set_mem_size (srcmem, NULL_RTX);
20255 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
20259 /* Output "rep; stos" instruction.
20260 Arguments have same meaning as for previous function */
20262 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
20263 rtx count, enum machine_mode mode,
20269 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
20270 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
20271 value = force_reg (mode, gen_lowpart (mode, value));
20272 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
20273 if (mode != QImode)
20275 destexp = gen_rtx_ASHIFT (Pmode, countreg,
20276 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
20277 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
20280 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
20281 if (orig_value == const0_rtx && CONST_INT_P (count))
20283 count = GEN_INT (INTVAL (count)
20284 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
20285 destmem = shallow_copy_rtx (destmem);
20286 set_mem_size (destmem, count);
20288 else if (MEM_SIZE (destmem))
20289 set_mem_size (destmem, NULL_RTX);
20290 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
20294 emit_strmov (rtx destmem, rtx srcmem,
20295 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
20297 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
20298 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
20299 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20302 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
20304 expand_movmem_epilogue (rtx destmem, rtx srcmem,
20305 rtx destptr, rtx srcptr, rtx count, int max_size)
20308 if (CONST_INT_P (count))
20310 HOST_WIDE_INT countval = INTVAL (count);
20313 if ((countval & 0x10) && max_size > 16)
20317 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
20318 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
20321 gcc_unreachable ();
20324 if ((countval & 0x08) && max_size > 8)
20327 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
20330 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
20331 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
20335 if ((countval & 0x04) && max_size > 4)
20337 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
20340 if ((countval & 0x02) && max_size > 2)
20342 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
20345 if ((countval & 0x01) && max_size > 1)
20347 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
20354 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
20355 count, 1, OPTAB_DIRECT);
20356 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
20357 count, QImode, 1, 4);
20361 /* When there are stringops, we can cheaply increase dest and src pointers.
20362 Otherwise we save code size by maintaining offset (zero is readily
20363 available from preceding rep operation) and using x86 addressing modes.
20365 if (TARGET_SINGLE_STRINGOP)
20369 rtx label = ix86_expand_aligntest (count, 4, true);
20370 src = change_address (srcmem, SImode, srcptr);
20371 dest = change_address (destmem, SImode, destptr);
20372 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20373 emit_label (label);
20374 LABEL_NUSES (label) = 1;
20378 rtx label = ix86_expand_aligntest (count, 2, true);
20379 src = change_address (srcmem, HImode, srcptr);
20380 dest = change_address (destmem, HImode, destptr);
20381 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20382 emit_label (label);
20383 LABEL_NUSES (label) = 1;
20387 rtx label = ix86_expand_aligntest (count, 1, true);
20388 src = change_address (srcmem, QImode, srcptr);
20389 dest = change_address (destmem, QImode, destptr);
20390 emit_insn (gen_strmov (destptr, dest, srcptr, src));
20391 emit_label (label);
20392 LABEL_NUSES (label) = 1;
20397 rtx offset = force_reg (Pmode, const0_rtx);
20402 rtx label = ix86_expand_aligntest (count, 4, true);
20403 src = change_address (srcmem, SImode, srcptr);
20404 dest = change_address (destmem, SImode, destptr);
20405 emit_move_insn (dest, src);
20406 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
20407 true, OPTAB_LIB_WIDEN);
20409 emit_move_insn (offset, tmp);
20410 emit_label (label);
20411 LABEL_NUSES (label) = 1;
20415 rtx label = ix86_expand_aligntest (count, 2, true);
20416 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20417 src = change_address (srcmem, HImode, tmp);
20418 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20419 dest = change_address (destmem, HImode, tmp);
20420 emit_move_insn (dest, src);
20421 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
20422 true, OPTAB_LIB_WIDEN);
20424 emit_move_insn (offset, tmp);
20425 emit_label (label);
20426 LABEL_NUSES (label) = 1;
20430 rtx label = ix86_expand_aligntest (count, 1, true);
20431 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20432 src = change_address (srcmem, QImode, tmp);
20433 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20434 dest = change_address (destmem, QImode, tmp);
20435 emit_move_insn (dest, src);
20436 emit_label (label);
20437 LABEL_NUSES (label) = 1;
20442 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20444 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
20445 rtx count, int max_size)
20448 expand_simple_binop (counter_mode (count), AND, count,
20449 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
20450 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
20451 gen_lowpart (QImode, value), count, QImode,
20455 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20457 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
20461 if (CONST_INT_P (count))
20463 HOST_WIDE_INT countval = INTVAL (count);
20466 if ((countval & 0x10) && max_size > 16)
20470 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20471 emit_insn (gen_strset (destptr, dest, value));
20472 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
20473 emit_insn (gen_strset (destptr, dest, value));
20476 gcc_unreachable ();
20479 if ((countval & 0x08) && max_size > 8)
20483 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20484 emit_insn (gen_strset (destptr, dest, value));
20488 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20489 emit_insn (gen_strset (destptr, dest, value));
20490 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
20491 emit_insn (gen_strset (destptr, dest, value));
20495 if ((countval & 0x04) && max_size > 4)
20497 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20498 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20501 if ((countval & 0x02) && max_size > 2)
20503 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
20504 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20507 if ((countval & 0x01) && max_size > 1)
20509 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
20510 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20517 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
20522 rtx label = ix86_expand_aligntest (count, 16, true);
20525 dest = change_address (destmem, DImode, destptr);
20526 emit_insn (gen_strset (destptr, dest, value));
20527 emit_insn (gen_strset (destptr, dest, value));
20531 dest = change_address (destmem, SImode, destptr);
20532 emit_insn (gen_strset (destptr, dest, value));
20533 emit_insn (gen_strset (destptr, dest, value));
20534 emit_insn (gen_strset (destptr, dest, value));
20535 emit_insn (gen_strset (destptr, dest, value));
20537 emit_label (label);
20538 LABEL_NUSES (label) = 1;
20542 rtx label = ix86_expand_aligntest (count, 8, true);
20545 dest = change_address (destmem, DImode, destptr);
20546 emit_insn (gen_strset (destptr, dest, value));
20550 dest = change_address (destmem, SImode, destptr);
20551 emit_insn (gen_strset (destptr, dest, value));
20552 emit_insn (gen_strset (destptr, dest, value));
20554 emit_label (label);
20555 LABEL_NUSES (label) = 1;
20559 rtx label = ix86_expand_aligntest (count, 4, true);
20560 dest = change_address (destmem, SImode, destptr);
20561 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20562 emit_label (label);
20563 LABEL_NUSES (label) = 1;
20567 rtx label = ix86_expand_aligntest (count, 2, true);
20568 dest = change_address (destmem, HImode, destptr);
20569 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20570 emit_label (label);
20571 LABEL_NUSES (label) = 1;
20575 rtx label = ix86_expand_aligntest (count, 1, true);
20576 dest = change_address (destmem, QImode, destptr);
20577 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20578 emit_label (label);
20579 LABEL_NUSES (label) = 1;
20583 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
20584 DESIRED_ALIGNMENT. */
20586 expand_movmem_prologue (rtx destmem, rtx srcmem,
20587 rtx destptr, rtx srcptr, rtx count,
20588 int align, int desired_alignment)
20590 if (align <= 1 && desired_alignment > 1)
20592 rtx label = ix86_expand_aligntest (destptr, 1, false);
20593 srcmem = change_address (srcmem, QImode, srcptr);
20594 destmem = change_address (destmem, QImode, destptr);
20595 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20596 ix86_adjust_counter (count, 1);
20597 emit_label (label);
20598 LABEL_NUSES (label) = 1;
20600 if (align <= 2 && desired_alignment > 2)
20602 rtx label = ix86_expand_aligntest (destptr, 2, false);
20603 srcmem = change_address (srcmem, HImode, srcptr);
20604 destmem = change_address (destmem, HImode, destptr);
20605 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20606 ix86_adjust_counter (count, 2);
20607 emit_label (label);
20608 LABEL_NUSES (label) = 1;
20610 if (align <= 4 && desired_alignment > 4)
20612 rtx label = ix86_expand_aligntest (destptr, 4, false);
20613 srcmem = change_address (srcmem, SImode, srcptr);
20614 destmem = change_address (destmem, SImode, destptr);
20615 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20616 ix86_adjust_counter (count, 4);
20617 emit_label (label);
20618 LABEL_NUSES (label) = 1;
20620 gcc_assert (desired_alignment <= 8);
20623 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
20624 ALIGN_BYTES is how many bytes need to be copied. */
20626 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
20627 int desired_align, int align_bytes)
20630 rtx src_size, dst_size;
20632 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
20633 if (src_align_bytes >= 0)
20634 src_align_bytes = desired_align - src_align_bytes;
20635 src_size = MEM_SIZE (src);
20636 dst_size = MEM_SIZE (dst);
20637 if (align_bytes & 1)
20639 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20640 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
20642 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20644 if (align_bytes & 2)
20646 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20647 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
20648 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20649 set_mem_align (dst, 2 * BITS_PER_UNIT);
20650 if (src_align_bytes >= 0
20651 && (src_align_bytes & 1) == (align_bytes & 1)
20652 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
20653 set_mem_align (src, 2 * BITS_PER_UNIT);
20655 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20657 if (align_bytes & 4)
20659 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20660 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
20661 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20662 set_mem_align (dst, 4 * BITS_PER_UNIT);
20663 if (src_align_bytes >= 0)
20665 unsigned int src_align = 0;
20666 if ((src_align_bytes & 3) == (align_bytes & 3))
20668 else if ((src_align_bytes & 1) == (align_bytes & 1))
20670 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20671 set_mem_align (src, src_align * BITS_PER_UNIT);
20674 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20676 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20677 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
20678 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20679 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20680 if (src_align_bytes >= 0)
20682 unsigned int src_align = 0;
20683 if ((src_align_bytes & 7) == (align_bytes & 7))
20685 else if ((src_align_bytes & 3) == (align_bytes & 3))
20687 else if ((src_align_bytes & 1) == (align_bytes & 1))
20689 if (src_align > (unsigned int) desired_align)
20690 src_align = desired_align;
20691 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20692 set_mem_align (src, src_align * BITS_PER_UNIT);
20695 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20697 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
20702 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
20703 DESIRED_ALIGNMENT. */
20705 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
20706 int align, int desired_alignment)
20708 if (align <= 1 && desired_alignment > 1)
20710 rtx label = ix86_expand_aligntest (destptr, 1, false);
20711 destmem = change_address (destmem, QImode, destptr);
20712 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
20713 ix86_adjust_counter (count, 1);
20714 emit_label (label);
20715 LABEL_NUSES (label) = 1;
20717 if (align <= 2 && desired_alignment > 2)
20719 rtx label = ix86_expand_aligntest (destptr, 2, false);
20720 destmem = change_address (destmem, HImode, destptr);
20721 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
20722 ix86_adjust_counter (count, 2);
20723 emit_label (label);
20724 LABEL_NUSES (label) = 1;
20726 if (align <= 4 && desired_alignment > 4)
20728 rtx label = ix86_expand_aligntest (destptr, 4, false);
20729 destmem = change_address (destmem, SImode, destptr);
20730 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
20731 ix86_adjust_counter (count, 4);
20732 emit_label (label);
20733 LABEL_NUSES (label) = 1;
20735 gcc_assert (desired_alignment <= 8);
20738 /* Set enough from DST to align DST known to by aligned by ALIGN to
20739 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
20741 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
20742 int desired_align, int align_bytes)
20745 rtx dst_size = MEM_SIZE (dst);
20746 if (align_bytes & 1)
20748 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20750 emit_insn (gen_strset (destreg, dst,
20751 gen_lowpart (QImode, value)));
20753 if (align_bytes & 2)
20755 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20756 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20757 set_mem_align (dst, 2 * BITS_PER_UNIT);
20759 emit_insn (gen_strset (destreg, dst,
20760 gen_lowpart (HImode, value)));
20762 if (align_bytes & 4)
20764 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20765 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20766 set_mem_align (dst, 4 * BITS_PER_UNIT);
20768 emit_insn (gen_strset (destreg, dst,
20769 gen_lowpart (SImode, value)));
20771 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20772 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20773 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20775 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
20779 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
20780 static enum stringop_alg
20781 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
20782 int *dynamic_check)
20784 const struct stringop_algs * algs;
20785 bool optimize_for_speed;
20786 /* Algorithms using the rep prefix want at least edi and ecx;
20787 additionally, memset wants eax and memcpy wants esi. Don't
20788 consider such algorithms if the user has appropriated those
20789 registers for their own purposes. */
20790 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
20792 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
20794 #define ALG_USABLE_P(alg) (rep_prefix_usable \
20795 || (alg != rep_prefix_1_byte \
20796 && alg != rep_prefix_4_byte \
20797 && alg != rep_prefix_8_byte))
20798 const struct processor_costs *cost;
20800 /* Even if the string operation call is cold, we still might spend a lot
20801 of time processing large blocks. */
20802 if (optimize_function_for_size_p (cfun)
20803 || (optimize_insn_for_size_p ()
20804 && expected_size != -1 && expected_size < 256))
20805 optimize_for_speed = false;
20807 optimize_for_speed = true;
20809 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
20811 *dynamic_check = -1;
20813 algs = &cost->memset[TARGET_64BIT != 0];
20815 algs = &cost->memcpy[TARGET_64BIT != 0];
20816 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
20817 return stringop_alg;
20818 /* rep; movq or rep; movl is the smallest variant. */
20819 else if (!optimize_for_speed)
20821 if (!count || (count & 3))
20822 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
20824 return rep_prefix_usable ? rep_prefix_4_byte : loop;
20826 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
20828 else if (expected_size != -1 && expected_size < 4)
20829 return loop_1_byte;
20830 else if (expected_size != -1)
20833 enum stringop_alg alg = libcall;
20834 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20836 /* We get here if the algorithms that were not libcall-based
20837 were rep-prefix based and we are unable to use rep prefixes
20838 based on global register usage. Break out of the loop and
20839 use the heuristic below. */
20840 if (algs->size[i].max == 0)
20842 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
20844 enum stringop_alg candidate = algs->size[i].alg;
20846 if (candidate != libcall && ALG_USABLE_P (candidate))
20848 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
20849 last non-libcall inline algorithm. */
20850 if (TARGET_INLINE_ALL_STRINGOPS)
20852 /* When the current size is best to be copied by a libcall,
20853 but we are still forced to inline, run the heuristic below
20854 that will pick code for medium sized blocks. */
20855 if (alg != libcall)
20859 else if (ALG_USABLE_P (candidate))
20863 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
20865 /* When asked to inline the call anyway, try to pick meaningful choice.
20866 We look for maximal size of block that is faster to copy by hand and
20867 take blocks of at most of that size guessing that average size will
20868 be roughly half of the block.
20870 If this turns out to be bad, we might simply specify the preferred
20871 choice in ix86_costs. */
20872 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20873 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
20876 enum stringop_alg alg;
20878 bool any_alg_usable_p = true;
20880 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20882 enum stringop_alg candidate = algs->size[i].alg;
20883 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
20885 if (candidate != libcall && candidate
20886 && ALG_USABLE_P (candidate))
20887 max = algs->size[i].max;
20889 /* If there aren't any usable algorithms, then recursing on
20890 smaller sizes isn't going to find anything. Just return the
20891 simple byte-at-a-time copy loop. */
20892 if (!any_alg_usable_p)
20894 /* Pick something reasonable. */
20895 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20896 *dynamic_check = 128;
20897 return loop_1_byte;
20901 alg = decide_alg (count, max / 2, memset, dynamic_check);
20902 gcc_assert (*dynamic_check == -1);
20903 gcc_assert (alg != libcall);
20904 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20905 *dynamic_check = max;
20908 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
20909 #undef ALG_USABLE_P
20912 /* Decide on alignment. We know that the operand is already aligned to ALIGN
20913 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
20915 decide_alignment (int align,
20916 enum stringop_alg alg,
20919 int desired_align = 0;
20923 gcc_unreachable ();
20925 case unrolled_loop:
20926 desired_align = GET_MODE_SIZE (Pmode);
20928 case rep_prefix_8_byte:
20931 case rep_prefix_4_byte:
20932 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20933 copying whole cacheline at once. */
20934 if (TARGET_PENTIUMPRO)
20939 case rep_prefix_1_byte:
20940 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20941 copying whole cacheline at once. */
20942 if (TARGET_PENTIUMPRO)
20956 if (desired_align < align)
20957 desired_align = align;
20958 if (expected_size != -1 && expected_size < 4)
20959 desired_align = align;
20960 return desired_align;
20963 /* Return the smallest power of 2 greater than VAL. */
20965 smallest_pow2_greater_than (int val)
20973 /* Expand string move (memcpy) operation. Use i386 string operations
20974 when profitable. expand_setmem contains similar code. The code
20975 depends upon architecture, block size and alignment, but always has
20976 the same overall structure:
20978 1) Prologue guard: Conditional that jumps up to epilogues for small
20979 blocks that can be handled by epilogue alone. This is faster
20980 but also needed for correctness, since prologue assume the block
20981 is larger than the desired alignment.
20983 Optional dynamic check for size and libcall for large
20984 blocks is emitted here too, with -minline-stringops-dynamically.
20986 2) Prologue: copy first few bytes in order to get destination
20987 aligned to DESIRED_ALIGN. It is emitted only when ALIGN is less
20988 than DESIRED_ALIGN and up to DESIRED_ALIGN - ALIGN bytes can be
20989 copied. We emit either a jump tree on power of two sized
20990 blocks, or a byte loop.
20992 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
20993 with specified algorithm.
20995 4) Epilogue: code copying tail of the block that is too small to be
20996 handled by main body (or up to size guarded by prologue guard). */
20999 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
21000 rtx expected_align_exp, rtx expected_size_exp)
21006 rtx jump_around_label = NULL;
21007 HOST_WIDE_INT align = 1;
21008 unsigned HOST_WIDE_INT count = 0;
21009 HOST_WIDE_INT expected_size = -1;
21010 int size_needed = 0, epilogue_size_needed;
21011 int desired_align = 0, align_bytes = 0;
21012 enum stringop_alg alg;
21014 bool need_zero_guard = false;
21016 if (CONST_INT_P (align_exp))
21017 align = INTVAL (align_exp);
21018 /* i386 can do misaligned access on reasonably increased cost. */
21019 if (CONST_INT_P (expected_align_exp)
21020 && INTVAL (expected_align_exp) > align)
21021 align = INTVAL (expected_align_exp);
21022 /* ALIGN is the minimum of destination and source alignment, but we care here
21023 just about destination alignment. */
21024 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
21025 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
21027 if (CONST_INT_P (count_exp))
21028 count = expected_size = INTVAL (count_exp);
21029 if (CONST_INT_P (expected_size_exp) && count == 0)
21030 expected_size = INTVAL (expected_size_exp);
21032 /* Make sure we don't need to care about overflow later on. */
21033 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21036 /* Step 0: Decide on preferred algorithm, desired alignment and
21037 size of chunks to be copied by main loop. */
21039 alg = decide_alg (count, expected_size, false, &dynamic_check);
21040 desired_align = decide_alignment (align, alg, expected_size);
21042 if (!TARGET_ALIGN_STRINGOPS)
21043 align = desired_align;
21045 if (alg == libcall)
21047 gcc_assert (alg != no_stringop);
21049 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
21050 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21051 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
21056 gcc_unreachable ();
21058 need_zero_guard = true;
21059 size_needed = GET_MODE_SIZE (Pmode);
21061 case unrolled_loop:
21062 need_zero_guard = true;
21063 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
21065 case rep_prefix_8_byte:
21068 case rep_prefix_4_byte:
21071 case rep_prefix_1_byte:
21075 need_zero_guard = true;
21080 epilogue_size_needed = size_needed;
21082 /* Step 1: Prologue guard. */
21084 /* Alignment code needs count to be in register. */
21085 if (CONST_INT_P (count_exp) && desired_align > align)
21087 if (INTVAL (count_exp) > desired_align
21088 && INTVAL (count_exp) > size_needed)
21091 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21092 if (align_bytes <= 0)
21095 align_bytes = desired_align - align_bytes;
21097 if (align_bytes == 0)
21098 count_exp = force_reg (counter_mode (count_exp), count_exp);
21100 gcc_assert (desired_align >= 1 && align >= 1);
21102 /* Ensure that alignment prologue won't copy past end of block. */
21103 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21105 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21106 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
21107 Make sure it is power of 2. */
21108 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21112 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21114 /* If main algorithm works on QImode, no epilogue is needed.
21115 For small sizes just don't align anything. */
21116 if (size_needed == 1)
21117 desired_align = align;
21124 label = gen_label_rtx ();
21125 emit_cmp_and_jump_insns (count_exp,
21126 GEN_INT (epilogue_size_needed),
21127 LTU, 0, counter_mode (count_exp), 1, label);
21128 if (expected_size == -1 || expected_size < epilogue_size_needed)
21129 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21131 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21135 /* Emit code to decide on runtime whether library call or inline should be
21137 if (dynamic_check != -1)
21139 if (CONST_INT_P (count_exp))
21141 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
21143 emit_block_move_via_libcall (dst, src, count_exp, false);
21144 count_exp = const0_rtx;
21150 rtx hot_label = gen_label_rtx ();
21151 jump_around_label = gen_label_rtx ();
21152 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21153 LEU, 0, GET_MODE (count_exp), 1, hot_label);
21154 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21155 emit_block_move_via_libcall (dst, src, count_exp, false);
21156 emit_jump (jump_around_label);
21157 emit_label (hot_label);
21161 /* Step 2: Alignment prologue. */
21163 if (desired_align > align)
21165 if (align_bytes == 0)
21167 /* Except for the first move in epilogue, we no longer know
21168 constant offset in aliasing info. It don't seems to worth
21169 the pain to maintain it for the first move, so throw away
21171 src = change_address (src, BLKmode, srcreg);
21172 dst = change_address (dst, BLKmode, destreg);
21173 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
21178 /* If we know how many bytes need to be stored before dst is
21179 sufficiently aligned, maintain aliasing info accurately. */
21180 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
21181 desired_align, align_bytes);
21182 count_exp = plus_constant (count_exp, -align_bytes);
21183 count -= align_bytes;
21185 if (need_zero_guard
21186 && (count < (unsigned HOST_WIDE_INT) size_needed
21187 || (align_bytes == 0
21188 && count < ((unsigned HOST_WIDE_INT) size_needed
21189 + desired_align - align))))
21191 /* It is possible that we copied enough so the main loop will not
21193 gcc_assert (size_needed > 1);
21194 if (label == NULL_RTX)
21195 label = gen_label_rtx ();
21196 emit_cmp_and_jump_insns (count_exp,
21197 GEN_INT (size_needed),
21198 LTU, 0, counter_mode (count_exp), 1, label);
21199 if (expected_size == -1
21200 || expected_size < (desired_align - align) / 2 + size_needed)
21201 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21203 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21206 if (label && size_needed == 1)
21208 emit_label (label);
21209 LABEL_NUSES (label) = 1;
21211 epilogue_size_needed = 1;
21213 else if (label == NULL_RTX)
21214 epilogue_size_needed = size_needed;
21216 /* Step 3: Main loop. */
21222 gcc_unreachable ();
21224 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21225 count_exp, QImode, 1, expected_size);
21228 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21229 count_exp, Pmode, 1, expected_size);
21231 case unrolled_loop:
21232 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
21233 registers for 4 temporaries anyway. */
21234 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
21235 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
21238 case rep_prefix_8_byte:
21239 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21242 case rep_prefix_4_byte:
21243 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21246 case rep_prefix_1_byte:
21247 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
21251 /* Adjust properly the offset of src and dest memory for aliasing. */
21252 if (CONST_INT_P (count_exp))
21254 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
21255 (count / size_needed) * size_needed);
21256 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21257 (count / size_needed) * size_needed);
21261 src = change_address (src, BLKmode, srcreg);
21262 dst = change_address (dst, BLKmode, destreg);
21265 /* Step 4: Epilogue to copy the remaining bytes. */
21269 /* When the main loop is done, COUNT_EXP might hold original count,
21270 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21271 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21272 bytes. Compensate if needed. */
21274 if (size_needed < epilogue_size_needed)
21277 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21278 GEN_INT (size_needed - 1), count_exp, 1,
21280 if (tmp != count_exp)
21281 emit_move_insn (count_exp, tmp);
21283 emit_label (label);
21284 LABEL_NUSES (label) = 1;
21287 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21288 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
21289 epilogue_size_needed);
21290 if (jump_around_label)
21291 emit_label (jump_around_label);
21295 /* Helper function for memcpy. For QImode value 0xXY produce
21296 0xXYXYXYXY of wide specified by MODE. This is essentially
21297 a * 0x10101010, but we can do slightly better than
21298 synth_mult by unwinding the sequence by hand on CPUs with
21301 promote_duplicated_reg (enum machine_mode mode, rtx val)
21303 enum machine_mode valmode = GET_MODE (val);
21305 int nops = mode == DImode ? 3 : 2;
21307 gcc_assert (mode == SImode || mode == DImode);
21308 if (val == const0_rtx)
21309 return copy_to_mode_reg (mode, const0_rtx);
21310 if (CONST_INT_P (val))
21312 HOST_WIDE_INT v = INTVAL (val) & 255;
21316 if (mode == DImode)
21317 v |= (v << 16) << 16;
21318 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
21321 if (valmode == VOIDmode)
21323 if (valmode != QImode)
21324 val = gen_lowpart (QImode, val);
21325 if (mode == QImode)
21327 if (!TARGET_PARTIAL_REG_STALL)
21329 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
21330 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
21331 <= (ix86_cost->shift_const + ix86_cost->add) * nops
21332 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
21334 rtx reg = convert_modes (mode, QImode, val, true);
21335 tmp = promote_duplicated_reg (mode, const1_rtx);
21336 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
21341 rtx reg = convert_modes (mode, QImode, val, true);
21343 if (!TARGET_PARTIAL_REG_STALL)
21344 if (mode == SImode)
21345 emit_insn (gen_movsi_insv_1 (reg, reg));
21347 emit_insn (gen_movdi_insv_1 (reg, reg));
21350 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
21351 NULL, 1, OPTAB_DIRECT);
21353 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21355 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
21356 NULL, 1, OPTAB_DIRECT);
21357 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21358 if (mode == SImode)
21360 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
21361 NULL, 1, OPTAB_DIRECT);
21362 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
21367 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
21368 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
21369 alignment from ALIGN to DESIRED_ALIGN. */
21371 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
21376 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
21377 promoted_val = promote_duplicated_reg (DImode, val);
21378 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
21379 promoted_val = promote_duplicated_reg (SImode, val);
21380 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
21381 promoted_val = promote_duplicated_reg (HImode, val);
21383 promoted_val = val;
21385 return promoted_val;
21388 /* Expand string clear operation (bzero). Use i386 string operations when
21389 profitable. See expand_movmem comment for explanation of individual
21390 steps performed. */
21392 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
21393 rtx expected_align_exp, rtx expected_size_exp)
21398 rtx jump_around_label = NULL;
21399 HOST_WIDE_INT align = 1;
21400 unsigned HOST_WIDE_INT count = 0;
21401 HOST_WIDE_INT expected_size = -1;
21402 int size_needed = 0, epilogue_size_needed;
21403 int desired_align = 0, align_bytes = 0;
21404 enum stringop_alg alg;
21405 rtx promoted_val = NULL;
21406 bool force_loopy_epilogue = false;
21408 bool need_zero_guard = false;
21410 if (CONST_INT_P (align_exp))
21411 align = INTVAL (align_exp);
21412 /* i386 can do misaligned access on reasonably increased cost. */
21413 if (CONST_INT_P (expected_align_exp)
21414 && INTVAL (expected_align_exp) > align)
21415 align = INTVAL (expected_align_exp);
21416 if (CONST_INT_P (count_exp))
21417 count = expected_size = INTVAL (count_exp);
21418 if (CONST_INT_P (expected_size_exp) && count == 0)
21419 expected_size = INTVAL (expected_size_exp);
21421 /* Make sure we don't need to care about overflow later on. */
21422 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21425 /* Step 0: Decide on preferred algorithm, desired alignment and
21426 size of chunks to be copied by main loop. */
21428 alg = decide_alg (count, expected_size, true, &dynamic_check);
21429 desired_align = decide_alignment (align, alg, expected_size);
21431 if (!TARGET_ALIGN_STRINGOPS)
21432 align = desired_align;
21434 if (alg == libcall)
21436 gcc_assert (alg != no_stringop);
21438 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
21439 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21444 gcc_unreachable ();
21446 need_zero_guard = true;
21447 size_needed = GET_MODE_SIZE (Pmode);
21449 case unrolled_loop:
21450 need_zero_guard = true;
21451 size_needed = GET_MODE_SIZE (Pmode) * 4;
21453 case rep_prefix_8_byte:
21456 case rep_prefix_4_byte:
21459 case rep_prefix_1_byte:
21463 need_zero_guard = true;
21467 epilogue_size_needed = size_needed;
21469 /* Step 1: Prologue guard. */
21471 /* Alignment code needs count to be in register. */
21472 if (CONST_INT_P (count_exp) && desired_align > align)
21474 if (INTVAL (count_exp) > desired_align
21475 && INTVAL (count_exp) > size_needed)
21478 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21479 if (align_bytes <= 0)
21482 align_bytes = desired_align - align_bytes;
21484 if (align_bytes == 0)
21486 enum machine_mode mode = SImode;
21487 if (TARGET_64BIT && (count & ~0xffffffff))
21489 count_exp = force_reg (mode, count_exp);
21492 /* Do the cheap promotion to allow better CSE across the
21493 main loop and epilogue (ie one load of the big constant in the
21494 front of all code. */
21495 if (CONST_INT_P (val_exp))
21496 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21497 desired_align, align);
21498 /* Ensure that alignment prologue won't copy past end of block. */
21499 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21501 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21502 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
21503 Make sure it is power of 2. */
21504 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21506 /* To improve performance of small blocks, we jump around the VAL
21507 promoting mode. This mean that if the promoted VAL is not constant,
21508 we might not use it in the epilogue and have to use byte
21510 if (epilogue_size_needed > 2 && !promoted_val)
21511 force_loopy_epilogue = true;
21514 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21516 /* If main algorithm works on QImode, no epilogue is needed.
21517 For small sizes just don't align anything. */
21518 if (size_needed == 1)
21519 desired_align = align;
21526 label = gen_label_rtx ();
21527 emit_cmp_and_jump_insns (count_exp,
21528 GEN_INT (epilogue_size_needed),
21529 LTU, 0, counter_mode (count_exp), 1, label);
21530 if (expected_size == -1 || expected_size <= epilogue_size_needed)
21531 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21533 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21536 if (dynamic_check != -1)
21538 rtx hot_label = gen_label_rtx ();
21539 jump_around_label = gen_label_rtx ();
21540 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21541 LEU, 0, counter_mode (count_exp), 1, hot_label);
21542 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21543 set_storage_via_libcall (dst, count_exp, val_exp, false);
21544 emit_jump (jump_around_label);
21545 emit_label (hot_label);
21548 /* Step 2: Alignment prologue. */
21550 /* Do the expensive promotion once we branched off the small blocks. */
21552 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21553 desired_align, align);
21554 gcc_assert (desired_align >= 1 && align >= 1);
21556 if (desired_align > align)
21558 if (align_bytes == 0)
21560 /* Except for the first move in epilogue, we no longer know
21561 constant offset in aliasing info. It don't seems to worth
21562 the pain to maintain it for the first move, so throw away
21564 dst = change_address (dst, BLKmode, destreg);
21565 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
21570 /* If we know how many bytes need to be stored before dst is
21571 sufficiently aligned, maintain aliasing info accurately. */
21572 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
21573 desired_align, align_bytes);
21574 count_exp = plus_constant (count_exp, -align_bytes);
21575 count -= align_bytes;
21577 if (need_zero_guard
21578 && (count < (unsigned HOST_WIDE_INT) size_needed
21579 || (align_bytes == 0
21580 && count < ((unsigned HOST_WIDE_INT) size_needed
21581 + desired_align - align))))
21583 /* It is possible that we copied enough so the main loop will not
21585 gcc_assert (size_needed > 1);
21586 if (label == NULL_RTX)
21587 label = gen_label_rtx ();
21588 emit_cmp_and_jump_insns (count_exp,
21589 GEN_INT (size_needed),
21590 LTU, 0, counter_mode (count_exp), 1, label);
21591 if (expected_size == -1
21592 || expected_size < (desired_align - align) / 2 + size_needed)
21593 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21595 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21598 if (label && size_needed == 1)
21600 emit_label (label);
21601 LABEL_NUSES (label) = 1;
21603 promoted_val = val_exp;
21604 epilogue_size_needed = 1;
21606 else if (label == NULL_RTX)
21607 epilogue_size_needed = size_needed;
21609 /* Step 3: Main loop. */
21615 gcc_unreachable ();
21617 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21618 count_exp, QImode, 1, expected_size);
21621 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21622 count_exp, Pmode, 1, expected_size);
21624 case unrolled_loop:
21625 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21626 count_exp, Pmode, 4, expected_size);
21628 case rep_prefix_8_byte:
21629 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21632 case rep_prefix_4_byte:
21633 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21636 case rep_prefix_1_byte:
21637 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21641 /* Adjust properly the offset of src and dest memory for aliasing. */
21642 if (CONST_INT_P (count_exp))
21643 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21644 (count / size_needed) * size_needed);
21646 dst = change_address (dst, BLKmode, destreg);
21648 /* Step 4: Epilogue to copy the remaining bytes. */
21652 /* When the main loop is done, COUNT_EXP might hold original count,
21653 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21654 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21655 bytes. Compensate if needed. */
21657 if (size_needed < epilogue_size_needed)
21660 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21661 GEN_INT (size_needed - 1), count_exp, 1,
21663 if (tmp != count_exp)
21664 emit_move_insn (count_exp, tmp);
21666 emit_label (label);
21667 LABEL_NUSES (label) = 1;
21670 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21672 if (force_loopy_epilogue)
21673 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
21674 epilogue_size_needed);
21676 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
21677 epilogue_size_needed);
21679 if (jump_around_label)
21680 emit_label (jump_around_label);
21684 /* Expand the appropriate insns for doing strlen if not just doing
21687 out = result, initialized with the start address
21688 align_rtx = alignment of the address.
21689 scratch = scratch register, initialized with the startaddress when
21690 not aligned, otherwise undefined
21692 This is just the body. It needs the initializations mentioned above and
21693 some address computing at the end. These things are done in i386.md. */
21696 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
21700 rtx align_2_label = NULL_RTX;
21701 rtx align_3_label = NULL_RTX;
21702 rtx align_4_label = gen_label_rtx ();
21703 rtx end_0_label = gen_label_rtx ();
21705 rtx tmpreg = gen_reg_rtx (SImode);
21706 rtx scratch = gen_reg_rtx (SImode);
21710 if (CONST_INT_P (align_rtx))
21711 align = INTVAL (align_rtx);
21713 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
21715 /* Is there a known alignment and is it less than 4? */
21718 rtx scratch1 = gen_reg_rtx (Pmode);
21719 emit_move_insn (scratch1, out);
21720 /* Is there a known alignment and is it not 2? */
21723 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
21724 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
21726 /* Leave just the 3 lower bits. */
21727 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
21728 NULL_RTX, 0, OPTAB_WIDEN);
21730 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21731 Pmode, 1, align_4_label);
21732 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
21733 Pmode, 1, align_2_label);
21734 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
21735 Pmode, 1, align_3_label);
21739 /* Since the alignment is 2, we have to check 2 or 0 bytes;
21740 check if is aligned to 4 - byte. */
21742 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
21743 NULL_RTX, 0, OPTAB_WIDEN);
21745 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21746 Pmode, 1, align_4_label);
21749 mem = change_address (src, QImode, out);
21751 /* Now compare the bytes. */
21753 /* Compare the first n unaligned byte on a byte per byte basis. */
21754 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
21755 QImode, 1, end_0_label);
21757 /* Increment the address. */
21758 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21760 /* Not needed with an alignment of 2 */
21763 emit_label (align_2_label);
21765 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21768 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21770 emit_label (align_3_label);
21773 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21776 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21779 /* Generate loop to check 4 bytes at a time. It is not a good idea to
21780 align this loop. It gives only huge programs, but does not help to
21782 emit_label (align_4_label);
21784 mem = change_address (src, SImode, out);
21785 emit_move_insn (scratch, mem);
21786 emit_insn (ix86_gen_add3 (out, out, GEN_INT (4)));
21788 /* This formula yields a nonzero result iff one of the bytes is zero.
21789 This saves three branches inside loop and many cycles. */
21791 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
21792 emit_insn (gen_one_cmplsi2 (scratch, scratch));
21793 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
21794 emit_insn (gen_andsi3 (tmpreg, tmpreg,
21795 gen_int_mode (0x80808080, SImode)));
21796 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
21801 rtx reg = gen_reg_rtx (SImode);
21802 rtx reg2 = gen_reg_rtx (Pmode);
21803 emit_move_insn (reg, tmpreg);
21804 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
21806 /* If zero is not in the first two bytes, move two bytes forward. */
21807 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21808 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21809 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21810 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
21811 gen_rtx_IF_THEN_ELSE (SImode, tmp,
21814 /* Emit lea manually to avoid clobbering of flags. */
21815 emit_insn (gen_rtx_SET (SImode, reg2,
21816 gen_rtx_PLUS (Pmode, out, const2_rtx)));
21818 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21819 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21820 emit_insn (gen_rtx_SET (VOIDmode, out,
21821 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
21827 rtx end_2_label = gen_label_rtx ();
21828 /* Is zero in the first two bytes? */
21830 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21831 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21832 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
21833 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
21834 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
21836 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
21837 JUMP_LABEL (tmp) = end_2_label;
21839 /* Not in the first two. Move two bytes forward. */
21840 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
21841 emit_insn (ix86_gen_add3 (out, out, const2_rtx));
21843 emit_label (end_2_label);
21847 /* Avoid branch in fixing the byte. */
21848 tmpreg = gen_lowpart (QImode, tmpreg);
21849 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
21850 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
21851 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
21852 emit_insn (ix86_gen_sub3_carry (out, out, GEN_INT (3), tmp, cmp));
21854 emit_label (end_0_label);
21857 /* Expand strlen. */
21860 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
21862 rtx addr, scratch1, scratch2, scratch3, scratch4;
21864 /* The generic case of strlen expander is long. Avoid it's
21865 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
21867 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21868 && !TARGET_INLINE_ALL_STRINGOPS
21869 && !optimize_insn_for_size_p ()
21870 && (!CONST_INT_P (align) || INTVAL (align) < 4))
21873 addr = force_reg (Pmode, XEXP (src, 0));
21874 scratch1 = gen_reg_rtx (Pmode);
21876 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21877 && !optimize_insn_for_size_p ())
21879 /* Well it seems that some optimizer does not combine a call like
21880 foo(strlen(bar), strlen(bar));
21881 when the move and the subtraction is done here. It does calculate
21882 the length just once when these instructions are done inside of
21883 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
21884 often used and I use one fewer register for the lifetime of
21885 output_strlen_unroll() this is better. */
21887 emit_move_insn (out, addr);
21889 ix86_expand_strlensi_unroll_1 (out, src, align);
21891 /* strlensi_unroll_1 returns the address of the zero at the end of
21892 the string, like memchr(), so compute the length by subtracting
21893 the start address. */
21894 emit_insn (ix86_gen_sub3 (out, out, addr));
21900 /* Can't use this if the user has appropriated eax, ecx, or edi. */
21901 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
21904 scratch2 = gen_reg_rtx (Pmode);
21905 scratch3 = gen_reg_rtx (Pmode);
21906 scratch4 = force_reg (Pmode, constm1_rtx);
21908 emit_move_insn (scratch3, addr);
21909 eoschar = force_reg (QImode, eoschar);
21911 src = replace_equiv_address_nv (src, scratch3);
21913 /* If .md starts supporting :P, this can be done in .md. */
21914 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
21915 scratch4), UNSPEC_SCAS);
21916 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
21917 emit_insn (ix86_gen_one_cmpl2 (scratch2, scratch1));
21918 emit_insn (ix86_gen_add3 (out, scratch2, constm1_rtx));
21923 /* For given symbol (function) construct code to compute address of it's PLT
21924 entry in large x86-64 PIC model. */
21926 construct_plt_address (rtx symbol)
21928 rtx tmp = gen_reg_rtx (Pmode);
21929 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
21931 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
21932 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
21934 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
21935 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
21940 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
21942 rtx pop, int sibcall)
21944 rtx use = NULL, call;
21946 if (pop == const0_rtx)
21948 gcc_assert (!TARGET_64BIT || !pop);
21950 if (TARGET_MACHO && !TARGET_64BIT)
21953 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
21954 fnaddr = machopic_indirect_call_target (fnaddr);
21959 /* Static functions and indirect calls don't need the pic register. */
21960 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
21961 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21962 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
21963 use_reg (&use, pic_offset_table_rtx);
21966 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
21968 rtx al = gen_rtx_REG (QImode, AX_REG);
21969 emit_move_insn (al, callarg2);
21970 use_reg (&use, al);
21973 if (ix86_cmodel == CM_LARGE_PIC
21975 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21976 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
21977 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
21979 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
21980 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
21982 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
21983 fnaddr = gen_rtx_MEM (QImode, fnaddr);
21986 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
21988 call = gen_rtx_SET (VOIDmode, retval, call);
21991 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
21992 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
21993 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
21995 if (TARGET_64BIT_MS_ABI
21996 && (!callarg2 || INTVAL (callarg2) != -2))
21998 /* We need to represent that SI and DI registers are clobbered
22000 static int clobbered_registers[] = {
22001 XMM6_REG, XMM7_REG, XMM8_REG,
22002 XMM9_REG, XMM10_REG, XMM11_REG,
22003 XMM12_REG, XMM13_REG, XMM14_REG,
22004 XMM15_REG, SI_REG, DI_REG
22007 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
22008 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
22009 UNSPEC_MS_TO_SYSV_CALL);
22013 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
22014 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
22017 (SSE_REGNO_P (clobbered_registers[i])
22019 clobbered_registers[i]));
22021 call = gen_rtx_PARALLEL (VOIDmode,
22022 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
22026 /* Add UNSPEC_CALL_NEEDS_VZEROUPPER decoration. */
22027 if (TARGET_VZEROUPPER)
22032 if (cfun->machine->callee_pass_avx256_p)
22034 if (cfun->machine->callee_return_avx256_p)
22035 avx256 = callee_return_pass_avx256;
22037 avx256 = callee_pass_avx256;
22039 else if (cfun->machine->callee_return_avx256_p)
22040 avx256 = callee_return_avx256;
22042 avx256 = call_no_avx256;
22044 if (reload_completed)
22045 emit_insn (gen_avx_vzeroupper (GEN_INT (avx256)));
22048 unspec = gen_rtx_UNSPEC (VOIDmode,
22049 gen_rtvec (1, GEN_INT (avx256)),
22050 UNSPEC_CALL_NEEDS_VZEROUPPER);
22051 call = gen_rtx_PARALLEL (VOIDmode,
22052 gen_rtvec (2, call, unspec));
22056 call = emit_call_insn (call);
22058 CALL_INSN_FUNCTION_USAGE (call) = use;
22064 ix86_split_call_vzeroupper (rtx insn, rtx vzeroupper)
22066 rtx call = XVECEXP (PATTERN (insn), 0, 0);
22067 emit_insn (gen_avx_vzeroupper (vzeroupper));
22068 emit_call_insn (call);
22071 /* Output the assembly for a call instruction. */
22074 ix86_output_call_insn (rtx insn, rtx call_op, int addr_op)
22076 bool direct_p = constant_call_address_operand (call_op, Pmode);
22077 bool seh_nop_p = false;
22079 gcc_assert (addr_op == 0 || addr_op == 1);
22081 if (SIBLING_CALL_P (insn))
22084 return addr_op ? "jmp\t%P1" : "jmp\t%P0";
22085 /* SEH epilogue detection requires the indirect branch case
22086 to include REX.W. */
22087 else if (TARGET_SEH)
22088 return addr_op ? "rex.W jmp %A1" : "rex.W jmp %A0";
22090 return addr_op ? "jmp\t%A1" : "jmp\t%A0";
22093 /* SEH unwinding can require an extra nop to be emitted in several
22094 circumstances. Determine if we have one of those. */
22099 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
22101 /* If we get to another real insn, we don't need the nop. */
22105 /* If we get to the epilogue note, prevent a catch region from
22106 being adjacent to the standard epilogue sequence. If non-
22107 call-exceptions, we'll have done this during epilogue emission. */
22108 if (NOTE_P (i) && NOTE_KIND (i) == NOTE_INSN_EPILOGUE_BEG
22109 && !flag_non_call_exceptions
22110 && !can_throw_internal (insn))
22117 /* If we didn't find a real insn following the call, prevent the
22118 unwinder from looking into the next function. */
22126 return addr_op ? "call\t%P1\n\tnop" : "call\t%P0\n\tnop";
22128 return addr_op ? "call\t%P1" : "call\t%P0";
22133 return addr_op ? "call\t%A1\n\tnop" : "call\t%A0\n\tnop";
22135 return addr_op ? "call\t%A1" : "call\t%A0";
22139 /* Clear stack slot assignments remembered from previous functions.
22140 This is called from INIT_EXPANDERS once before RTL is emitted for each
22143 static struct machine_function *
22144 ix86_init_machine_status (void)
22146 struct machine_function *f;
22148 f = ggc_alloc_cleared_machine_function ();
22149 f->use_fast_prologue_epilogue_nregs = -1;
22150 f->tls_descriptor_call_expanded_p = 0;
22151 f->call_abi = ix86_abi;
22156 /* Return a MEM corresponding to a stack slot with mode MODE.
22157 Allocate a new slot if necessary.
22159 The RTL for a function can have several slots available: N is
22160 which slot to use. */
22163 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
22165 struct stack_local_entry *s;
22167 gcc_assert (n < MAX_386_STACK_LOCALS);
22169 /* Virtual slot is valid only before vregs are instantiated. */
22170 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
22172 for (s = ix86_stack_locals; s; s = s->next)
22173 if (s->mode == mode && s->n == n)
22174 return copy_rtx (s->rtl);
22176 s = ggc_alloc_stack_local_entry ();
22179 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
22181 s->next = ix86_stack_locals;
22182 ix86_stack_locals = s;
22186 /* Construct the SYMBOL_REF for the tls_get_addr function. */
22188 static GTY(()) rtx ix86_tls_symbol;
22190 ix86_tls_get_addr (void)
22193 if (!ix86_tls_symbol)
22195 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
22196 (TARGET_ANY_GNU_TLS
22198 ? "___tls_get_addr"
22199 : "__tls_get_addr");
22202 return ix86_tls_symbol;
22205 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
22207 static GTY(()) rtx ix86_tls_module_base_symbol;
22209 ix86_tls_module_base (void)
22212 if (!ix86_tls_module_base_symbol)
22214 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
22215 "_TLS_MODULE_BASE_");
22216 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
22217 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
22220 return ix86_tls_module_base_symbol;
22223 /* Calculate the length of the memory address in the instruction
22224 encoding. Does not include the one-byte modrm, opcode, or prefix. */
22227 memory_address_length (rtx addr)
22229 struct ix86_address parts;
22230 rtx base, index, disp;
22234 if (GET_CODE (addr) == PRE_DEC
22235 || GET_CODE (addr) == POST_INC
22236 || GET_CODE (addr) == PRE_MODIFY
22237 || GET_CODE (addr) == POST_MODIFY)
22240 ok = ix86_decompose_address (addr, &parts);
22243 if (parts.base && GET_CODE (parts.base) == SUBREG)
22244 parts.base = SUBREG_REG (parts.base);
22245 if (parts.index && GET_CODE (parts.index) == SUBREG)
22246 parts.index = SUBREG_REG (parts.index);
22249 index = parts.index;
22254 - esp as the base always wants an index,
22255 - ebp as the base always wants a displacement,
22256 - r12 as the base always wants an index,
22257 - r13 as the base always wants a displacement. */
22259 /* Register Indirect. */
22260 if (base && !index && !disp)
22262 /* esp (for its index) and ebp (for its displacement) need
22263 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
22266 && (addr == arg_pointer_rtx
22267 || addr == frame_pointer_rtx
22268 || REGNO (addr) == SP_REG
22269 || REGNO (addr) == BP_REG
22270 || REGNO (addr) == R12_REG
22271 || REGNO (addr) == R13_REG))
22275 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
22276 is not disp32, but disp32(%rip), so for disp32
22277 SIB byte is needed, unless print_operand_address
22278 optimizes it into disp32(%rip) or (%rip) is implied
22280 else if (disp && !base && !index)
22287 if (GET_CODE (disp) == CONST)
22288 symbol = XEXP (disp, 0);
22289 if (GET_CODE (symbol) == PLUS
22290 && CONST_INT_P (XEXP (symbol, 1)))
22291 symbol = XEXP (symbol, 0);
22293 if (GET_CODE (symbol) != LABEL_REF
22294 && (GET_CODE (symbol) != SYMBOL_REF
22295 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
22296 && (GET_CODE (symbol) != UNSPEC
22297 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
22298 && XINT (symbol, 1) != UNSPEC_PCREL
22299 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
22306 /* Find the length of the displacement constant. */
22309 if (base && satisfies_constraint_K (disp))
22314 /* ebp always wants a displacement. Similarly r13. */
22315 else if (base && REG_P (base)
22316 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
22319 /* An index requires the two-byte modrm form.... */
22321 /* ...like esp (or r12), which always wants an index. */
22322 || base == arg_pointer_rtx
22323 || base == frame_pointer_rtx
22324 || (base && REG_P (base)
22325 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
22342 /* Compute default value for "length_immediate" attribute. When SHORTFORM
22343 is set, expect that insn have 8bit immediate alternative. */
22345 ix86_attr_length_immediate_default (rtx insn, int shortform)
22349 extract_insn_cached (insn);
22350 for (i = recog_data.n_operands - 1; i >= 0; --i)
22351 if (CONSTANT_P (recog_data.operand[i]))
22353 enum attr_mode mode = get_attr_mode (insn);
22356 if (shortform && CONST_INT_P (recog_data.operand[i]))
22358 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
22365 ival = trunc_int_for_mode (ival, HImode);
22368 ival = trunc_int_for_mode (ival, SImode);
22373 if (IN_RANGE (ival, -128, 127))
22390 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
22395 fatal_insn ("unknown insn mode", insn);
22400 /* Compute default value for "length_address" attribute. */
22402 ix86_attr_length_address_default (rtx insn)
22406 if (get_attr_type (insn) == TYPE_LEA)
22408 rtx set = PATTERN (insn), addr;
22410 if (GET_CODE (set) == PARALLEL)
22411 set = XVECEXP (set, 0, 0);
22413 gcc_assert (GET_CODE (set) == SET);
22415 addr = SET_SRC (set);
22416 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
22418 if (GET_CODE (addr) == ZERO_EXTEND)
22419 addr = XEXP (addr, 0);
22420 if (GET_CODE (addr) == SUBREG)
22421 addr = SUBREG_REG (addr);
22424 return memory_address_length (addr);
22427 extract_insn_cached (insn);
22428 for (i = recog_data.n_operands - 1; i >= 0; --i)
22429 if (MEM_P (recog_data.operand[i]))
22431 constrain_operands_cached (reload_completed);
22432 if (which_alternative != -1)
22434 const char *constraints = recog_data.constraints[i];
22435 int alt = which_alternative;
22437 while (*constraints == '=' || *constraints == '+')
22440 while (*constraints++ != ',')
22442 /* Skip ignored operands. */
22443 if (*constraints == 'X')
22446 return memory_address_length (XEXP (recog_data.operand[i], 0));
22451 /* Compute default value for "length_vex" attribute. It includes
22452 2 or 3 byte VEX prefix and 1 opcode byte. */
22455 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
22460 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
22461 byte VEX prefix. */
22462 if (!has_0f_opcode || has_vex_w)
22465 /* We can always use 2 byte VEX prefix in 32bit. */
22469 extract_insn_cached (insn);
22471 for (i = recog_data.n_operands - 1; i >= 0; --i)
22472 if (REG_P (recog_data.operand[i]))
22474 /* REX.W bit uses 3 byte VEX prefix. */
22475 if (GET_MODE (recog_data.operand[i]) == DImode
22476 && GENERAL_REG_P (recog_data.operand[i]))
22481 /* REX.X or REX.B bits use 3 byte VEX prefix. */
22482 if (MEM_P (recog_data.operand[i])
22483 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
22490 /* Return the maximum number of instructions a cpu can issue. */
22493 ix86_issue_rate (void)
22497 case PROCESSOR_PENTIUM:
22498 case PROCESSOR_ATOM:
22502 case PROCESSOR_PENTIUMPRO:
22503 case PROCESSOR_PENTIUM4:
22504 case PROCESSOR_CORE2_32:
22505 case PROCESSOR_CORE2_64:
22506 case PROCESSOR_COREI7_32:
22507 case PROCESSOR_COREI7_64:
22508 case PROCESSOR_ATHLON:
22510 case PROCESSOR_AMDFAM10:
22511 case PROCESSOR_NOCONA:
22512 case PROCESSOR_GENERIC32:
22513 case PROCESSOR_GENERIC64:
22514 case PROCESSOR_BDVER1:
22515 case PROCESSOR_BTVER1:
22523 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
22524 by DEP_INSN and nothing set by DEP_INSN. */
22527 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
22531 /* Simplify the test for uninteresting insns. */
22532 if (insn_type != TYPE_SETCC
22533 && insn_type != TYPE_ICMOV
22534 && insn_type != TYPE_FCMOV
22535 && insn_type != TYPE_IBR)
22538 if ((set = single_set (dep_insn)) != 0)
22540 set = SET_DEST (set);
22543 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
22544 && XVECLEN (PATTERN (dep_insn), 0) == 2
22545 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
22546 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
22548 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22549 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22554 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
22557 /* This test is true if the dependent insn reads the flags but
22558 not any other potentially set register. */
22559 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
22562 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
22568 /* Return true iff USE_INSN has a memory address with operands set by
22572 ix86_agi_dependent (rtx set_insn, rtx use_insn)
22575 extract_insn_cached (use_insn);
22576 for (i = recog_data.n_operands - 1; i >= 0; --i)
22577 if (MEM_P (recog_data.operand[i]))
22579 rtx addr = XEXP (recog_data.operand[i], 0);
22580 return modified_in_p (addr, set_insn) != 0;
22586 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22588 enum attr_type insn_type, dep_insn_type;
22589 enum attr_memory memory;
22591 int dep_insn_code_number;
22593 /* Anti and output dependencies have zero cost on all CPUs. */
22594 if (REG_NOTE_KIND (link) != 0)
22597 dep_insn_code_number = recog_memoized (dep_insn);
22599 /* If we can't recognize the insns, we can't really do anything. */
22600 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
22603 insn_type = get_attr_type (insn);
22604 dep_insn_type = get_attr_type (dep_insn);
22608 case PROCESSOR_PENTIUM:
22609 /* Address Generation Interlock adds a cycle of latency. */
22610 if (insn_type == TYPE_LEA)
22612 rtx addr = PATTERN (insn);
22614 if (GET_CODE (addr) == PARALLEL)
22615 addr = XVECEXP (addr, 0, 0);
22617 gcc_assert (GET_CODE (addr) == SET);
22619 addr = SET_SRC (addr);
22620 if (modified_in_p (addr, dep_insn))
22623 else if (ix86_agi_dependent (dep_insn, insn))
22626 /* ??? Compares pair with jump/setcc. */
22627 if (ix86_flags_dependent (insn, dep_insn, insn_type))
22630 /* Floating point stores require value to be ready one cycle earlier. */
22631 if (insn_type == TYPE_FMOV
22632 && get_attr_memory (insn) == MEMORY_STORE
22633 && !ix86_agi_dependent (dep_insn, insn))
22637 case PROCESSOR_PENTIUMPRO:
22638 memory = get_attr_memory (insn);
22640 /* INT->FP conversion is expensive. */
22641 if (get_attr_fp_int_src (dep_insn))
22644 /* There is one cycle extra latency between an FP op and a store. */
22645 if (insn_type == TYPE_FMOV
22646 && (set = single_set (dep_insn)) != NULL_RTX
22647 && (set2 = single_set (insn)) != NULL_RTX
22648 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
22649 && MEM_P (SET_DEST (set2)))
22652 /* Show ability of reorder buffer to hide latency of load by executing
22653 in parallel with previous instruction in case
22654 previous instruction is not needed to compute the address. */
22655 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22656 && !ix86_agi_dependent (dep_insn, insn))
22658 /* Claim moves to take one cycle, as core can issue one load
22659 at time and the next load can start cycle later. */
22660 if (dep_insn_type == TYPE_IMOV
22661 || dep_insn_type == TYPE_FMOV)
22669 memory = get_attr_memory (insn);
22671 /* The esp dependency is resolved before the instruction is really
22673 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
22674 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
22677 /* INT->FP conversion is expensive. */
22678 if (get_attr_fp_int_src (dep_insn))
22681 /* Show ability of reorder buffer to hide latency of load by executing
22682 in parallel with previous instruction in case
22683 previous instruction is not needed to compute the address. */
22684 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22685 && !ix86_agi_dependent (dep_insn, insn))
22687 /* Claim moves to take one cycle, as core can issue one load
22688 at time and the next load can start cycle later. */
22689 if (dep_insn_type == TYPE_IMOV
22690 || dep_insn_type == TYPE_FMOV)
22699 case PROCESSOR_ATHLON:
22701 case PROCESSOR_AMDFAM10:
22702 case PROCESSOR_BDVER1:
22703 case PROCESSOR_BTVER1:
22704 case PROCESSOR_ATOM:
22705 case PROCESSOR_GENERIC32:
22706 case PROCESSOR_GENERIC64:
22707 memory = get_attr_memory (insn);
22709 /* Show ability of reorder buffer to hide latency of load by executing
22710 in parallel with previous instruction in case
22711 previous instruction is not needed to compute the address. */
22712 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22713 && !ix86_agi_dependent (dep_insn, insn))
22715 enum attr_unit unit = get_attr_unit (insn);
22718 /* Because of the difference between the length of integer and
22719 floating unit pipeline preparation stages, the memory operands
22720 for floating point are cheaper.
22722 ??? For Athlon it the difference is most probably 2. */
22723 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
22726 loadcost = TARGET_ATHLON ? 2 : 0;
22728 if (cost >= loadcost)
22741 /* How many alternative schedules to try. This should be as wide as the
22742 scheduling freedom in the DFA, but no wider. Making this value too
22743 large results extra work for the scheduler. */
22746 ia32_multipass_dfa_lookahead (void)
22750 case PROCESSOR_PENTIUM:
22753 case PROCESSOR_PENTIUMPRO:
22757 case PROCESSOR_CORE2_32:
22758 case PROCESSOR_CORE2_64:
22759 case PROCESSOR_COREI7_32:
22760 case PROCESSOR_COREI7_64:
22761 /* Generally, we want haifa-sched:max_issue() to look ahead as far
22762 as many instructions can be executed on a cycle, i.e.,
22763 issue_rate. I wonder why tuning for many CPUs does not do this. */
22764 return ix86_issue_rate ();
22773 /* Model decoder of Core 2/i7.
22774 Below hooks for multipass scheduling (see haifa-sched.c:max_issue)
22775 track the instruction fetch block boundaries and make sure that long
22776 (9+ bytes) instructions are assigned to D0. */
22778 /* Maximum length of an insn that can be handled by
22779 a secondary decoder unit. '8' for Core 2/i7. */
22780 static int core2i7_secondary_decoder_max_insn_size;
22782 /* Ifetch block size, i.e., number of bytes decoder reads per cycle.
22783 '16' for Core 2/i7. */
22784 static int core2i7_ifetch_block_size;
22786 /* Maximum number of instructions decoder can handle per cycle.
22787 '6' for Core 2/i7. */
22788 static int core2i7_ifetch_block_max_insns;
22790 typedef struct ix86_first_cycle_multipass_data_ *
22791 ix86_first_cycle_multipass_data_t;
22792 typedef const struct ix86_first_cycle_multipass_data_ *
22793 const_ix86_first_cycle_multipass_data_t;
22795 /* A variable to store target state across calls to max_issue within
22797 static struct ix86_first_cycle_multipass_data_ _ix86_first_cycle_multipass_data,
22798 *ix86_first_cycle_multipass_data = &_ix86_first_cycle_multipass_data;
22800 /* Initialize DATA. */
22802 core2i7_first_cycle_multipass_init (void *_data)
22804 ix86_first_cycle_multipass_data_t data
22805 = (ix86_first_cycle_multipass_data_t) _data;
22807 data->ifetch_block_len = 0;
22808 data->ifetch_block_n_insns = 0;
22809 data->ready_try_change = NULL;
22810 data->ready_try_change_size = 0;
22813 /* Advancing the cycle; reset ifetch block counts. */
22815 core2i7_dfa_post_advance_cycle (void)
22817 ix86_first_cycle_multipass_data_t data = ix86_first_cycle_multipass_data;
22819 gcc_assert (data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22821 data->ifetch_block_len = 0;
22822 data->ifetch_block_n_insns = 0;
22825 static int min_insn_size (rtx);
22827 /* Filter out insns from ready_try that the core will not be able to issue
22828 on current cycle due to decoder. */
22830 core2i7_first_cycle_multipass_filter_ready_try
22831 (const_ix86_first_cycle_multipass_data_t data,
22832 char *ready_try, int n_ready, bool first_cycle_insn_p)
22839 if (ready_try[n_ready])
22842 insn = get_ready_element (n_ready);
22843 insn_size = min_insn_size (insn);
22845 if (/* If this is a too long an insn for a secondary decoder ... */
22846 (!first_cycle_insn_p
22847 && insn_size > core2i7_secondary_decoder_max_insn_size)
22848 /* ... or it would not fit into the ifetch block ... */
22849 || data->ifetch_block_len + insn_size > core2i7_ifetch_block_size
22850 /* ... or the decoder is full already ... */
22851 || data->ifetch_block_n_insns + 1 > core2i7_ifetch_block_max_insns)
22852 /* ... mask the insn out. */
22854 ready_try[n_ready] = 1;
22856 if (data->ready_try_change)
22857 SET_BIT (data->ready_try_change, n_ready);
22862 /* Prepare for a new round of multipass lookahead scheduling. */
22864 core2i7_first_cycle_multipass_begin (void *_data, char *ready_try, int n_ready,
22865 bool first_cycle_insn_p)
22867 ix86_first_cycle_multipass_data_t data
22868 = (ix86_first_cycle_multipass_data_t) _data;
22869 const_ix86_first_cycle_multipass_data_t prev_data
22870 = ix86_first_cycle_multipass_data;
22872 /* Restore the state from the end of the previous round. */
22873 data->ifetch_block_len = prev_data->ifetch_block_len;
22874 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns;
22876 /* Filter instructions that cannot be issued on current cycle due to
22877 decoder restrictions. */
22878 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22879 first_cycle_insn_p);
22882 /* INSN is being issued in current solution. Account for its impact on
22883 the decoder model. */
22885 core2i7_first_cycle_multipass_issue (void *_data, char *ready_try, int n_ready,
22886 rtx insn, const void *_prev_data)
22888 ix86_first_cycle_multipass_data_t data
22889 = (ix86_first_cycle_multipass_data_t) _data;
22890 const_ix86_first_cycle_multipass_data_t prev_data
22891 = (const_ix86_first_cycle_multipass_data_t) _prev_data;
22893 int insn_size = min_insn_size (insn);
22895 data->ifetch_block_len = prev_data->ifetch_block_len + insn_size;
22896 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns + 1;
22897 gcc_assert (data->ifetch_block_len <= core2i7_ifetch_block_size
22898 && data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22900 /* Allocate or resize the bitmap for storing INSN's effect on ready_try. */
22901 if (!data->ready_try_change)
22903 data->ready_try_change = sbitmap_alloc (n_ready);
22904 data->ready_try_change_size = n_ready;
22906 else if (data->ready_try_change_size < n_ready)
22908 data->ready_try_change = sbitmap_resize (data->ready_try_change,
22910 data->ready_try_change_size = n_ready;
22912 sbitmap_zero (data->ready_try_change);
22914 /* Filter out insns from ready_try that the core will not be able to issue
22915 on current cycle due to decoder. */
22916 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22920 /* Revert the effect on ready_try. */
22922 core2i7_first_cycle_multipass_backtrack (const void *_data,
22924 int n_ready ATTRIBUTE_UNUSED)
22926 const_ix86_first_cycle_multipass_data_t data
22927 = (const_ix86_first_cycle_multipass_data_t) _data;
22928 unsigned int i = 0;
22929 sbitmap_iterator sbi;
22931 gcc_assert (sbitmap_last_set_bit (data->ready_try_change) < n_ready);
22932 EXECUTE_IF_SET_IN_SBITMAP (data->ready_try_change, 0, i, sbi)
22938 /* Save the result of multipass lookahead scheduling for the next round. */
22940 core2i7_first_cycle_multipass_end (const void *_data)
22942 const_ix86_first_cycle_multipass_data_t data
22943 = (const_ix86_first_cycle_multipass_data_t) _data;
22944 ix86_first_cycle_multipass_data_t next_data
22945 = ix86_first_cycle_multipass_data;
22949 next_data->ifetch_block_len = data->ifetch_block_len;
22950 next_data->ifetch_block_n_insns = data->ifetch_block_n_insns;
22954 /* Deallocate target data. */
22956 core2i7_first_cycle_multipass_fini (void *_data)
22958 ix86_first_cycle_multipass_data_t data
22959 = (ix86_first_cycle_multipass_data_t) _data;
22961 if (data->ready_try_change)
22963 sbitmap_free (data->ready_try_change);
22964 data->ready_try_change = NULL;
22965 data->ready_try_change_size = 0;
22969 /* Prepare for scheduling pass. */
22971 ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
22972 int verbose ATTRIBUTE_UNUSED,
22973 int max_uid ATTRIBUTE_UNUSED)
22975 /* Install scheduling hooks for current CPU. Some of these hooks are used
22976 in time-critical parts of the scheduler, so we only set them up when
22977 they are actually used. */
22980 case PROCESSOR_CORE2_32:
22981 case PROCESSOR_CORE2_64:
22982 case PROCESSOR_COREI7_32:
22983 case PROCESSOR_COREI7_64:
22984 targetm.sched.dfa_post_advance_cycle
22985 = core2i7_dfa_post_advance_cycle;
22986 targetm.sched.first_cycle_multipass_init
22987 = core2i7_first_cycle_multipass_init;
22988 targetm.sched.first_cycle_multipass_begin
22989 = core2i7_first_cycle_multipass_begin;
22990 targetm.sched.first_cycle_multipass_issue
22991 = core2i7_first_cycle_multipass_issue;
22992 targetm.sched.first_cycle_multipass_backtrack
22993 = core2i7_first_cycle_multipass_backtrack;
22994 targetm.sched.first_cycle_multipass_end
22995 = core2i7_first_cycle_multipass_end;
22996 targetm.sched.first_cycle_multipass_fini
22997 = core2i7_first_cycle_multipass_fini;
22999 /* Set decoder parameters. */
23000 core2i7_secondary_decoder_max_insn_size = 8;
23001 core2i7_ifetch_block_size = 16;
23002 core2i7_ifetch_block_max_insns = 6;
23006 targetm.sched.dfa_post_advance_cycle = NULL;
23007 targetm.sched.first_cycle_multipass_init = NULL;
23008 targetm.sched.first_cycle_multipass_begin = NULL;
23009 targetm.sched.first_cycle_multipass_issue = NULL;
23010 targetm.sched.first_cycle_multipass_backtrack = NULL;
23011 targetm.sched.first_cycle_multipass_end = NULL;
23012 targetm.sched.first_cycle_multipass_fini = NULL;
23018 /* Compute the alignment given to a constant that is being placed in memory.
23019 EXP is the constant and ALIGN is the alignment that the object would
23021 The value of this function is used instead of that alignment to align
23025 ix86_constant_alignment (tree exp, int align)
23027 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
23028 || TREE_CODE (exp) == INTEGER_CST)
23030 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
23032 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
23035 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
23036 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
23037 return BITS_PER_WORD;
23042 /* Compute the alignment for a static variable.
23043 TYPE is the data type, and ALIGN is the alignment that
23044 the object would ordinarily have. The value of this function is used
23045 instead of that alignment to align the object. */
23048 ix86_data_alignment (tree type, int align)
23050 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
23052 if (AGGREGATE_TYPE_P (type)
23053 && TYPE_SIZE (type)
23054 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23055 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
23056 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
23057 && align < max_align)
23060 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23061 to 16byte boundary. */
23064 if (AGGREGATE_TYPE_P (type)
23065 && TYPE_SIZE (type)
23066 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23067 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
23068 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
23072 if (TREE_CODE (type) == ARRAY_TYPE)
23074 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
23076 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
23079 else if (TREE_CODE (type) == COMPLEX_TYPE)
23082 if (TYPE_MODE (type) == DCmode && align < 64)
23084 if ((TYPE_MODE (type) == XCmode
23085 || TYPE_MODE (type) == TCmode) && align < 128)
23088 else if ((TREE_CODE (type) == RECORD_TYPE
23089 || TREE_CODE (type) == UNION_TYPE
23090 || TREE_CODE (type) == QUAL_UNION_TYPE)
23091 && TYPE_FIELDS (type))
23093 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
23095 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
23098 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
23099 || TREE_CODE (type) == INTEGER_TYPE)
23101 if (TYPE_MODE (type) == DFmode && align < 64)
23103 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
23110 /* Compute the alignment for a local variable or a stack slot. EXP is
23111 the data type or decl itself, MODE is the widest mode available and
23112 ALIGN is the alignment that the object would ordinarily have. The
23113 value of this macro is used instead of that alignment to align the
23117 ix86_local_alignment (tree exp, enum machine_mode mode,
23118 unsigned int align)
23122 if (exp && DECL_P (exp))
23124 type = TREE_TYPE (exp);
23133 /* Don't do dynamic stack realignment for long long objects with
23134 -mpreferred-stack-boundary=2. */
23137 && ix86_preferred_stack_boundary < 64
23138 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
23139 && (!type || !TYPE_USER_ALIGN (type))
23140 && (!decl || !DECL_USER_ALIGN (decl)))
23143 /* If TYPE is NULL, we are allocating a stack slot for caller-save
23144 register in MODE. We will return the largest alignment of XF
23148 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
23149 align = GET_MODE_ALIGNMENT (DFmode);
23153 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23154 to 16byte boundary. Exact wording is:
23156 An array uses the same alignment as its elements, except that a local or
23157 global array variable of length at least 16 bytes or
23158 a C99 variable-length array variable always has alignment of at least 16 bytes.
23160 This was added to allow use of aligned SSE instructions at arrays. This
23161 rule is meant for static storage (where compiler can not do the analysis
23162 by itself). We follow it for automatic variables only when convenient.
23163 We fully control everything in the function compiled and functions from
23164 other unit can not rely on the alignment.
23166 Exclude va_list type. It is the common case of local array where
23167 we can not benefit from the alignment. */
23168 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
23171 if (AGGREGATE_TYPE_P (type)
23172 && (va_list_type_node == NULL_TREE
23173 || (TYPE_MAIN_VARIANT (type)
23174 != TYPE_MAIN_VARIANT (va_list_type_node)))
23175 && TYPE_SIZE (type)
23176 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23177 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
23178 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
23181 if (TREE_CODE (type) == ARRAY_TYPE)
23183 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
23185 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
23188 else if (TREE_CODE (type) == COMPLEX_TYPE)
23190 if (TYPE_MODE (type) == DCmode && align < 64)
23192 if ((TYPE_MODE (type) == XCmode
23193 || TYPE_MODE (type) == TCmode) && align < 128)
23196 else if ((TREE_CODE (type) == RECORD_TYPE
23197 || TREE_CODE (type) == UNION_TYPE
23198 || TREE_CODE (type) == QUAL_UNION_TYPE)
23199 && TYPE_FIELDS (type))
23201 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
23203 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
23206 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
23207 || TREE_CODE (type) == INTEGER_TYPE)
23210 if (TYPE_MODE (type) == DFmode && align < 64)
23212 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
23218 /* Compute the minimum required alignment for dynamic stack realignment
23219 purposes for a local variable, parameter or a stack slot. EXP is
23220 the data type or decl itself, MODE is its mode and ALIGN is the
23221 alignment that the object would ordinarily have. */
23224 ix86_minimum_alignment (tree exp, enum machine_mode mode,
23225 unsigned int align)
23229 if (exp && DECL_P (exp))
23231 type = TREE_TYPE (exp);
23240 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
23243 /* Don't do dynamic stack realignment for long long objects with
23244 -mpreferred-stack-boundary=2. */
23245 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
23246 && (!type || !TYPE_USER_ALIGN (type))
23247 && (!decl || !DECL_USER_ALIGN (decl)))
23253 /* Find a location for the static chain incoming to a nested function.
23254 This is a register, unless all free registers are used by arguments. */
23257 ix86_static_chain (const_tree fndecl, bool incoming_p)
23261 if (!DECL_STATIC_CHAIN (fndecl))
23266 /* We always use R10 in 64-bit mode. */
23274 /* By default in 32-bit mode we use ECX to pass the static chain. */
23277 fntype = TREE_TYPE (fndecl);
23278 ccvt = ix86_get_callcvt (fntype);
23279 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) != 0)
23281 /* Fastcall functions use ecx/edx for arguments, which leaves
23282 us with EAX for the static chain.
23283 Thiscall functions use ecx for arguments, which also
23284 leaves us with EAX for the static chain. */
23287 else if (ix86_function_regparm (fntype, fndecl) == 3)
23289 /* For regparm 3, we have no free call-clobbered registers in
23290 which to store the static chain. In order to implement this,
23291 we have the trampoline push the static chain to the stack.
23292 However, we can't push a value below the return address when
23293 we call the nested function directly, so we have to use an
23294 alternate entry point. For this we use ESI, and have the
23295 alternate entry point push ESI, so that things appear the
23296 same once we're executing the nested function. */
23299 if (fndecl == current_function_decl)
23300 ix86_static_chain_on_stack = true;
23301 return gen_frame_mem (SImode,
23302 plus_constant (arg_pointer_rtx, -8));
23308 return gen_rtx_REG (Pmode, regno);
23311 /* Emit RTL insns to initialize the variable parts of a trampoline.
23312 FNDECL is the decl of the target address; M_TRAMP is a MEM for
23313 the trampoline, and CHAIN_VALUE is an RTX for the static chain
23314 to be passed to the target function. */
23317 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
23321 fnaddr = XEXP (DECL_RTL (fndecl), 0);
23328 /* Depending on the static chain location, either load a register
23329 with a constant, or push the constant to the stack. All of the
23330 instructions are the same size. */
23331 chain = ix86_static_chain (fndecl, true);
23334 if (REGNO (chain) == CX_REG)
23336 else if (REGNO (chain) == AX_REG)
23339 gcc_unreachable ();
23344 mem = adjust_address (m_tramp, QImode, 0);
23345 emit_move_insn (mem, gen_int_mode (opcode, QImode));
23347 mem = adjust_address (m_tramp, SImode, 1);
23348 emit_move_insn (mem, chain_value);
23350 /* Compute offset from the end of the jmp to the target function.
23351 In the case in which the trampoline stores the static chain on
23352 the stack, we need to skip the first insn which pushes the
23353 (call-saved) register static chain; this push is 1 byte. */
23354 disp = expand_binop (SImode, sub_optab, fnaddr,
23355 plus_constant (XEXP (m_tramp, 0),
23356 MEM_P (chain) ? 9 : 10),
23357 NULL_RTX, 1, OPTAB_DIRECT);
23359 mem = adjust_address (m_tramp, QImode, 5);
23360 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
23362 mem = adjust_address (m_tramp, SImode, 6);
23363 emit_move_insn (mem, disp);
23369 /* Load the function address to r11. Try to load address using
23370 the shorter movl instead of movabs. We may want to support
23371 movq for kernel mode, but kernel does not use trampolines at
23373 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
23375 fnaddr = copy_to_mode_reg (DImode, fnaddr);
23377 mem = adjust_address (m_tramp, HImode, offset);
23378 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
23380 mem = adjust_address (m_tramp, SImode, offset + 2);
23381 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
23386 mem = adjust_address (m_tramp, HImode, offset);
23387 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
23389 mem = adjust_address (m_tramp, DImode, offset + 2);
23390 emit_move_insn (mem, fnaddr);
23394 /* Load static chain using movabs to r10. */
23395 mem = adjust_address (m_tramp, HImode, offset);
23396 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
23398 mem = adjust_address (m_tramp, DImode, offset + 2);
23399 emit_move_insn (mem, chain_value);
23402 /* Jump to r11; the last (unused) byte is a nop, only there to
23403 pad the write out to a single 32-bit store. */
23404 mem = adjust_address (m_tramp, SImode, offset);
23405 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
23408 gcc_assert (offset <= TRAMPOLINE_SIZE);
23411 #ifdef ENABLE_EXECUTE_STACK
23412 #ifdef CHECK_EXECUTE_STACK_ENABLED
23413 if (CHECK_EXECUTE_STACK_ENABLED)
23415 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
23416 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
23420 /* The following file contains several enumerations and data structures
23421 built from the definitions in i386-builtin-types.def. */
23423 #include "i386-builtin-types.inc"
23425 /* Table for the ix86 builtin non-function types. */
23426 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
23428 /* Retrieve an element from the above table, building some of
23429 the types lazily. */
23432 ix86_get_builtin_type (enum ix86_builtin_type tcode)
23434 unsigned int index;
23437 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
23439 type = ix86_builtin_type_tab[(int) tcode];
23443 gcc_assert (tcode > IX86_BT_LAST_PRIM);
23444 if (tcode <= IX86_BT_LAST_VECT)
23446 enum machine_mode mode;
23448 index = tcode - IX86_BT_LAST_PRIM - 1;
23449 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
23450 mode = ix86_builtin_type_vect_mode[index];
23452 type = build_vector_type_for_mode (itype, mode);
23458 index = tcode - IX86_BT_LAST_VECT - 1;
23459 if (tcode <= IX86_BT_LAST_PTR)
23460 quals = TYPE_UNQUALIFIED;
23462 quals = TYPE_QUAL_CONST;
23464 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
23465 if (quals != TYPE_UNQUALIFIED)
23466 itype = build_qualified_type (itype, quals);
23468 type = build_pointer_type (itype);
23471 ix86_builtin_type_tab[(int) tcode] = type;
23475 /* Table for the ix86 builtin function types. */
23476 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
23478 /* Retrieve an element from the above table, building some of
23479 the types lazily. */
23482 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
23486 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
23488 type = ix86_builtin_func_type_tab[(int) tcode];
23492 if (tcode <= IX86_BT_LAST_FUNC)
23494 unsigned start = ix86_builtin_func_start[(int) tcode];
23495 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
23496 tree rtype, atype, args = void_list_node;
23499 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
23500 for (i = after - 1; i > start; --i)
23502 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
23503 args = tree_cons (NULL, atype, args);
23506 type = build_function_type (rtype, args);
23510 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
23511 enum ix86_builtin_func_type icode;
23513 icode = ix86_builtin_func_alias_base[index];
23514 type = ix86_get_builtin_func_type (icode);
23517 ix86_builtin_func_type_tab[(int) tcode] = type;
23522 /* Codes for all the SSE/MMX builtins. */
23525 IX86_BUILTIN_ADDPS,
23526 IX86_BUILTIN_ADDSS,
23527 IX86_BUILTIN_DIVPS,
23528 IX86_BUILTIN_DIVSS,
23529 IX86_BUILTIN_MULPS,
23530 IX86_BUILTIN_MULSS,
23531 IX86_BUILTIN_SUBPS,
23532 IX86_BUILTIN_SUBSS,
23534 IX86_BUILTIN_CMPEQPS,
23535 IX86_BUILTIN_CMPLTPS,
23536 IX86_BUILTIN_CMPLEPS,
23537 IX86_BUILTIN_CMPGTPS,
23538 IX86_BUILTIN_CMPGEPS,
23539 IX86_BUILTIN_CMPNEQPS,
23540 IX86_BUILTIN_CMPNLTPS,
23541 IX86_BUILTIN_CMPNLEPS,
23542 IX86_BUILTIN_CMPNGTPS,
23543 IX86_BUILTIN_CMPNGEPS,
23544 IX86_BUILTIN_CMPORDPS,
23545 IX86_BUILTIN_CMPUNORDPS,
23546 IX86_BUILTIN_CMPEQSS,
23547 IX86_BUILTIN_CMPLTSS,
23548 IX86_BUILTIN_CMPLESS,
23549 IX86_BUILTIN_CMPNEQSS,
23550 IX86_BUILTIN_CMPNLTSS,
23551 IX86_BUILTIN_CMPNLESS,
23552 IX86_BUILTIN_CMPNGTSS,
23553 IX86_BUILTIN_CMPNGESS,
23554 IX86_BUILTIN_CMPORDSS,
23555 IX86_BUILTIN_CMPUNORDSS,
23557 IX86_BUILTIN_COMIEQSS,
23558 IX86_BUILTIN_COMILTSS,
23559 IX86_BUILTIN_COMILESS,
23560 IX86_BUILTIN_COMIGTSS,
23561 IX86_BUILTIN_COMIGESS,
23562 IX86_BUILTIN_COMINEQSS,
23563 IX86_BUILTIN_UCOMIEQSS,
23564 IX86_BUILTIN_UCOMILTSS,
23565 IX86_BUILTIN_UCOMILESS,
23566 IX86_BUILTIN_UCOMIGTSS,
23567 IX86_BUILTIN_UCOMIGESS,
23568 IX86_BUILTIN_UCOMINEQSS,
23570 IX86_BUILTIN_CVTPI2PS,
23571 IX86_BUILTIN_CVTPS2PI,
23572 IX86_BUILTIN_CVTSI2SS,
23573 IX86_BUILTIN_CVTSI642SS,
23574 IX86_BUILTIN_CVTSS2SI,
23575 IX86_BUILTIN_CVTSS2SI64,
23576 IX86_BUILTIN_CVTTPS2PI,
23577 IX86_BUILTIN_CVTTSS2SI,
23578 IX86_BUILTIN_CVTTSS2SI64,
23580 IX86_BUILTIN_MAXPS,
23581 IX86_BUILTIN_MAXSS,
23582 IX86_BUILTIN_MINPS,
23583 IX86_BUILTIN_MINSS,
23585 IX86_BUILTIN_LOADUPS,
23586 IX86_BUILTIN_STOREUPS,
23587 IX86_BUILTIN_MOVSS,
23589 IX86_BUILTIN_MOVHLPS,
23590 IX86_BUILTIN_MOVLHPS,
23591 IX86_BUILTIN_LOADHPS,
23592 IX86_BUILTIN_LOADLPS,
23593 IX86_BUILTIN_STOREHPS,
23594 IX86_BUILTIN_STORELPS,
23596 IX86_BUILTIN_MASKMOVQ,
23597 IX86_BUILTIN_MOVMSKPS,
23598 IX86_BUILTIN_PMOVMSKB,
23600 IX86_BUILTIN_MOVNTPS,
23601 IX86_BUILTIN_MOVNTQ,
23603 IX86_BUILTIN_LOADDQU,
23604 IX86_BUILTIN_STOREDQU,
23606 IX86_BUILTIN_PACKSSWB,
23607 IX86_BUILTIN_PACKSSDW,
23608 IX86_BUILTIN_PACKUSWB,
23610 IX86_BUILTIN_PADDB,
23611 IX86_BUILTIN_PADDW,
23612 IX86_BUILTIN_PADDD,
23613 IX86_BUILTIN_PADDQ,
23614 IX86_BUILTIN_PADDSB,
23615 IX86_BUILTIN_PADDSW,
23616 IX86_BUILTIN_PADDUSB,
23617 IX86_BUILTIN_PADDUSW,
23618 IX86_BUILTIN_PSUBB,
23619 IX86_BUILTIN_PSUBW,
23620 IX86_BUILTIN_PSUBD,
23621 IX86_BUILTIN_PSUBQ,
23622 IX86_BUILTIN_PSUBSB,
23623 IX86_BUILTIN_PSUBSW,
23624 IX86_BUILTIN_PSUBUSB,
23625 IX86_BUILTIN_PSUBUSW,
23628 IX86_BUILTIN_PANDN,
23632 IX86_BUILTIN_PAVGB,
23633 IX86_BUILTIN_PAVGW,
23635 IX86_BUILTIN_PCMPEQB,
23636 IX86_BUILTIN_PCMPEQW,
23637 IX86_BUILTIN_PCMPEQD,
23638 IX86_BUILTIN_PCMPGTB,
23639 IX86_BUILTIN_PCMPGTW,
23640 IX86_BUILTIN_PCMPGTD,
23642 IX86_BUILTIN_PMADDWD,
23644 IX86_BUILTIN_PMAXSW,
23645 IX86_BUILTIN_PMAXUB,
23646 IX86_BUILTIN_PMINSW,
23647 IX86_BUILTIN_PMINUB,
23649 IX86_BUILTIN_PMULHUW,
23650 IX86_BUILTIN_PMULHW,
23651 IX86_BUILTIN_PMULLW,
23653 IX86_BUILTIN_PSADBW,
23654 IX86_BUILTIN_PSHUFW,
23656 IX86_BUILTIN_PSLLW,
23657 IX86_BUILTIN_PSLLD,
23658 IX86_BUILTIN_PSLLQ,
23659 IX86_BUILTIN_PSRAW,
23660 IX86_BUILTIN_PSRAD,
23661 IX86_BUILTIN_PSRLW,
23662 IX86_BUILTIN_PSRLD,
23663 IX86_BUILTIN_PSRLQ,
23664 IX86_BUILTIN_PSLLWI,
23665 IX86_BUILTIN_PSLLDI,
23666 IX86_BUILTIN_PSLLQI,
23667 IX86_BUILTIN_PSRAWI,
23668 IX86_BUILTIN_PSRADI,
23669 IX86_BUILTIN_PSRLWI,
23670 IX86_BUILTIN_PSRLDI,
23671 IX86_BUILTIN_PSRLQI,
23673 IX86_BUILTIN_PUNPCKHBW,
23674 IX86_BUILTIN_PUNPCKHWD,
23675 IX86_BUILTIN_PUNPCKHDQ,
23676 IX86_BUILTIN_PUNPCKLBW,
23677 IX86_BUILTIN_PUNPCKLWD,
23678 IX86_BUILTIN_PUNPCKLDQ,
23680 IX86_BUILTIN_SHUFPS,
23682 IX86_BUILTIN_RCPPS,
23683 IX86_BUILTIN_RCPSS,
23684 IX86_BUILTIN_RSQRTPS,
23685 IX86_BUILTIN_RSQRTPS_NR,
23686 IX86_BUILTIN_RSQRTSS,
23687 IX86_BUILTIN_RSQRTF,
23688 IX86_BUILTIN_SQRTPS,
23689 IX86_BUILTIN_SQRTPS_NR,
23690 IX86_BUILTIN_SQRTSS,
23692 IX86_BUILTIN_UNPCKHPS,
23693 IX86_BUILTIN_UNPCKLPS,
23695 IX86_BUILTIN_ANDPS,
23696 IX86_BUILTIN_ANDNPS,
23698 IX86_BUILTIN_XORPS,
23701 IX86_BUILTIN_LDMXCSR,
23702 IX86_BUILTIN_STMXCSR,
23703 IX86_BUILTIN_SFENCE,
23705 /* 3DNow! Original */
23706 IX86_BUILTIN_FEMMS,
23707 IX86_BUILTIN_PAVGUSB,
23708 IX86_BUILTIN_PF2ID,
23709 IX86_BUILTIN_PFACC,
23710 IX86_BUILTIN_PFADD,
23711 IX86_BUILTIN_PFCMPEQ,
23712 IX86_BUILTIN_PFCMPGE,
23713 IX86_BUILTIN_PFCMPGT,
23714 IX86_BUILTIN_PFMAX,
23715 IX86_BUILTIN_PFMIN,
23716 IX86_BUILTIN_PFMUL,
23717 IX86_BUILTIN_PFRCP,
23718 IX86_BUILTIN_PFRCPIT1,
23719 IX86_BUILTIN_PFRCPIT2,
23720 IX86_BUILTIN_PFRSQIT1,
23721 IX86_BUILTIN_PFRSQRT,
23722 IX86_BUILTIN_PFSUB,
23723 IX86_BUILTIN_PFSUBR,
23724 IX86_BUILTIN_PI2FD,
23725 IX86_BUILTIN_PMULHRW,
23727 /* 3DNow! Athlon Extensions */
23728 IX86_BUILTIN_PF2IW,
23729 IX86_BUILTIN_PFNACC,
23730 IX86_BUILTIN_PFPNACC,
23731 IX86_BUILTIN_PI2FW,
23732 IX86_BUILTIN_PSWAPDSI,
23733 IX86_BUILTIN_PSWAPDSF,
23736 IX86_BUILTIN_ADDPD,
23737 IX86_BUILTIN_ADDSD,
23738 IX86_BUILTIN_DIVPD,
23739 IX86_BUILTIN_DIVSD,
23740 IX86_BUILTIN_MULPD,
23741 IX86_BUILTIN_MULSD,
23742 IX86_BUILTIN_SUBPD,
23743 IX86_BUILTIN_SUBSD,
23745 IX86_BUILTIN_CMPEQPD,
23746 IX86_BUILTIN_CMPLTPD,
23747 IX86_BUILTIN_CMPLEPD,
23748 IX86_BUILTIN_CMPGTPD,
23749 IX86_BUILTIN_CMPGEPD,
23750 IX86_BUILTIN_CMPNEQPD,
23751 IX86_BUILTIN_CMPNLTPD,
23752 IX86_BUILTIN_CMPNLEPD,
23753 IX86_BUILTIN_CMPNGTPD,
23754 IX86_BUILTIN_CMPNGEPD,
23755 IX86_BUILTIN_CMPORDPD,
23756 IX86_BUILTIN_CMPUNORDPD,
23757 IX86_BUILTIN_CMPEQSD,
23758 IX86_BUILTIN_CMPLTSD,
23759 IX86_BUILTIN_CMPLESD,
23760 IX86_BUILTIN_CMPNEQSD,
23761 IX86_BUILTIN_CMPNLTSD,
23762 IX86_BUILTIN_CMPNLESD,
23763 IX86_BUILTIN_CMPORDSD,
23764 IX86_BUILTIN_CMPUNORDSD,
23766 IX86_BUILTIN_COMIEQSD,
23767 IX86_BUILTIN_COMILTSD,
23768 IX86_BUILTIN_COMILESD,
23769 IX86_BUILTIN_COMIGTSD,
23770 IX86_BUILTIN_COMIGESD,
23771 IX86_BUILTIN_COMINEQSD,
23772 IX86_BUILTIN_UCOMIEQSD,
23773 IX86_BUILTIN_UCOMILTSD,
23774 IX86_BUILTIN_UCOMILESD,
23775 IX86_BUILTIN_UCOMIGTSD,
23776 IX86_BUILTIN_UCOMIGESD,
23777 IX86_BUILTIN_UCOMINEQSD,
23779 IX86_BUILTIN_MAXPD,
23780 IX86_BUILTIN_MAXSD,
23781 IX86_BUILTIN_MINPD,
23782 IX86_BUILTIN_MINSD,
23784 IX86_BUILTIN_ANDPD,
23785 IX86_BUILTIN_ANDNPD,
23787 IX86_BUILTIN_XORPD,
23789 IX86_BUILTIN_SQRTPD,
23790 IX86_BUILTIN_SQRTSD,
23792 IX86_BUILTIN_UNPCKHPD,
23793 IX86_BUILTIN_UNPCKLPD,
23795 IX86_BUILTIN_SHUFPD,
23797 IX86_BUILTIN_LOADUPD,
23798 IX86_BUILTIN_STOREUPD,
23799 IX86_BUILTIN_MOVSD,
23801 IX86_BUILTIN_LOADHPD,
23802 IX86_BUILTIN_LOADLPD,
23804 IX86_BUILTIN_CVTDQ2PD,
23805 IX86_BUILTIN_CVTDQ2PS,
23807 IX86_BUILTIN_CVTPD2DQ,
23808 IX86_BUILTIN_CVTPD2PI,
23809 IX86_BUILTIN_CVTPD2PS,
23810 IX86_BUILTIN_CVTTPD2DQ,
23811 IX86_BUILTIN_CVTTPD2PI,
23813 IX86_BUILTIN_CVTPI2PD,
23814 IX86_BUILTIN_CVTSI2SD,
23815 IX86_BUILTIN_CVTSI642SD,
23817 IX86_BUILTIN_CVTSD2SI,
23818 IX86_BUILTIN_CVTSD2SI64,
23819 IX86_BUILTIN_CVTSD2SS,
23820 IX86_BUILTIN_CVTSS2SD,
23821 IX86_BUILTIN_CVTTSD2SI,
23822 IX86_BUILTIN_CVTTSD2SI64,
23824 IX86_BUILTIN_CVTPS2DQ,
23825 IX86_BUILTIN_CVTPS2PD,
23826 IX86_BUILTIN_CVTTPS2DQ,
23828 IX86_BUILTIN_MOVNTI,
23829 IX86_BUILTIN_MOVNTPD,
23830 IX86_BUILTIN_MOVNTDQ,
23832 IX86_BUILTIN_MOVQ128,
23835 IX86_BUILTIN_MASKMOVDQU,
23836 IX86_BUILTIN_MOVMSKPD,
23837 IX86_BUILTIN_PMOVMSKB128,
23839 IX86_BUILTIN_PACKSSWB128,
23840 IX86_BUILTIN_PACKSSDW128,
23841 IX86_BUILTIN_PACKUSWB128,
23843 IX86_BUILTIN_PADDB128,
23844 IX86_BUILTIN_PADDW128,
23845 IX86_BUILTIN_PADDD128,
23846 IX86_BUILTIN_PADDQ128,
23847 IX86_BUILTIN_PADDSB128,
23848 IX86_BUILTIN_PADDSW128,
23849 IX86_BUILTIN_PADDUSB128,
23850 IX86_BUILTIN_PADDUSW128,
23851 IX86_BUILTIN_PSUBB128,
23852 IX86_BUILTIN_PSUBW128,
23853 IX86_BUILTIN_PSUBD128,
23854 IX86_BUILTIN_PSUBQ128,
23855 IX86_BUILTIN_PSUBSB128,
23856 IX86_BUILTIN_PSUBSW128,
23857 IX86_BUILTIN_PSUBUSB128,
23858 IX86_BUILTIN_PSUBUSW128,
23860 IX86_BUILTIN_PAND128,
23861 IX86_BUILTIN_PANDN128,
23862 IX86_BUILTIN_POR128,
23863 IX86_BUILTIN_PXOR128,
23865 IX86_BUILTIN_PAVGB128,
23866 IX86_BUILTIN_PAVGW128,
23868 IX86_BUILTIN_PCMPEQB128,
23869 IX86_BUILTIN_PCMPEQW128,
23870 IX86_BUILTIN_PCMPEQD128,
23871 IX86_BUILTIN_PCMPGTB128,
23872 IX86_BUILTIN_PCMPGTW128,
23873 IX86_BUILTIN_PCMPGTD128,
23875 IX86_BUILTIN_PMADDWD128,
23877 IX86_BUILTIN_PMAXSW128,
23878 IX86_BUILTIN_PMAXUB128,
23879 IX86_BUILTIN_PMINSW128,
23880 IX86_BUILTIN_PMINUB128,
23882 IX86_BUILTIN_PMULUDQ,
23883 IX86_BUILTIN_PMULUDQ128,
23884 IX86_BUILTIN_PMULHUW128,
23885 IX86_BUILTIN_PMULHW128,
23886 IX86_BUILTIN_PMULLW128,
23888 IX86_BUILTIN_PSADBW128,
23889 IX86_BUILTIN_PSHUFHW,
23890 IX86_BUILTIN_PSHUFLW,
23891 IX86_BUILTIN_PSHUFD,
23893 IX86_BUILTIN_PSLLDQI128,
23894 IX86_BUILTIN_PSLLWI128,
23895 IX86_BUILTIN_PSLLDI128,
23896 IX86_BUILTIN_PSLLQI128,
23897 IX86_BUILTIN_PSRAWI128,
23898 IX86_BUILTIN_PSRADI128,
23899 IX86_BUILTIN_PSRLDQI128,
23900 IX86_BUILTIN_PSRLWI128,
23901 IX86_BUILTIN_PSRLDI128,
23902 IX86_BUILTIN_PSRLQI128,
23904 IX86_BUILTIN_PSLLDQ128,
23905 IX86_BUILTIN_PSLLW128,
23906 IX86_BUILTIN_PSLLD128,
23907 IX86_BUILTIN_PSLLQ128,
23908 IX86_BUILTIN_PSRAW128,
23909 IX86_BUILTIN_PSRAD128,
23910 IX86_BUILTIN_PSRLW128,
23911 IX86_BUILTIN_PSRLD128,
23912 IX86_BUILTIN_PSRLQ128,
23914 IX86_BUILTIN_PUNPCKHBW128,
23915 IX86_BUILTIN_PUNPCKHWD128,
23916 IX86_BUILTIN_PUNPCKHDQ128,
23917 IX86_BUILTIN_PUNPCKHQDQ128,
23918 IX86_BUILTIN_PUNPCKLBW128,
23919 IX86_BUILTIN_PUNPCKLWD128,
23920 IX86_BUILTIN_PUNPCKLDQ128,
23921 IX86_BUILTIN_PUNPCKLQDQ128,
23923 IX86_BUILTIN_CLFLUSH,
23924 IX86_BUILTIN_MFENCE,
23925 IX86_BUILTIN_LFENCE,
23927 IX86_BUILTIN_BSRSI,
23928 IX86_BUILTIN_BSRDI,
23929 IX86_BUILTIN_RDPMC,
23930 IX86_BUILTIN_RDTSC,
23931 IX86_BUILTIN_RDTSCP,
23932 IX86_BUILTIN_ROLQI,
23933 IX86_BUILTIN_ROLHI,
23934 IX86_BUILTIN_RORQI,
23935 IX86_BUILTIN_RORHI,
23938 IX86_BUILTIN_ADDSUBPS,
23939 IX86_BUILTIN_HADDPS,
23940 IX86_BUILTIN_HSUBPS,
23941 IX86_BUILTIN_MOVSHDUP,
23942 IX86_BUILTIN_MOVSLDUP,
23943 IX86_BUILTIN_ADDSUBPD,
23944 IX86_BUILTIN_HADDPD,
23945 IX86_BUILTIN_HSUBPD,
23946 IX86_BUILTIN_LDDQU,
23948 IX86_BUILTIN_MONITOR,
23949 IX86_BUILTIN_MWAIT,
23952 IX86_BUILTIN_PHADDW,
23953 IX86_BUILTIN_PHADDD,
23954 IX86_BUILTIN_PHADDSW,
23955 IX86_BUILTIN_PHSUBW,
23956 IX86_BUILTIN_PHSUBD,
23957 IX86_BUILTIN_PHSUBSW,
23958 IX86_BUILTIN_PMADDUBSW,
23959 IX86_BUILTIN_PMULHRSW,
23960 IX86_BUILTIN_PSHUFB,
23961 IX86_BUILTIN_PSIGNB,
23962 IX86_BUILTIN_PSIGNW,
23963 IX86_BUILTIN_PSIGND,
23964 IX86_BUILTIN_PALIGNR,
23965 IX86_BUILTIN_PABSB,
23966 IX86_BUILTIN_PABSW,
23967 IX86_BUILTIN_PABSD,
23969 IX86_BUILTIN_PHADDW128,
23970 IX86_BUILTIN_PHADDD128,
23971 IX86_BUILTIN_PHADDSW128,
23972 IX86_BUILTIN_PHSUBW128,
23973 IX86_BUILTIN_PHSUBD128,
23974 IX86_BUILTIN_PHSUBSW128,
23975 IX86_BUILTIN_PMADDUBSW128,
23976 IX86_BUILTIN_PMULHRSW128,
23977 IX86_BUILTIN_PSHUFB128,
23978 IX86_BUILTIN_PSIGNB128,
23979 IX86_BUILTIN_PSIGNW128,
23980 IX86_BUILTIN_PSIGND128,
23981 IX86_BUILTIN_PALIGNR128,
23982 IX86_BUILTIN_PABSB128,
23983 IX86_BUILTIN_PABSW128,
23984 IX86_BUILTIN_PABSD128,
23986 /* AMDFAM10 - SSE4A New Instructions. */
23987 IX86_BUILTIN_MOVNTSD,
23988 IX86_BUILTIN_MOVNTSS,
23989 IX86_BUILTIN_EXTRQI,
23990 IX86_BUILTIN_EXTRQ,
23991 IX86_BUILTIN_INSERTQI,
23992 IX86_BUILTIN_INSERTQ,
23995 IX86_BUILTIN_BLENDPD,
23996 IX86_BUILTIN_BLENDPS,
23997 IX86_BUILTIN_BLENDVPD,
23998 IX86_BUILTIN_BLENDVPS,
23999 IX86_BUILTIN_PBLENDVB128,
24000 IX86_BUILTIN_PBLENDW128,
24005 IX86_BUILTIN_INSERTPS128,
24007 IX86_BUILTIN_MOVNTDQA,
24008 IX86_BUILTIN_MPSADBW128,
24009 IX86_BUILTIN_PACKUSDW128,
24010 IX86_BUILTIN_PCMPEQQ,
24011 IX86_BUILTIN_PHMINPOSUW128,
24013 IX86_BUILTIN_PMAXSB128,
24014 IX86_BUILTIN_PMAXSD128,
24015 IX86_BUILTIN_PMAXUD128,
24016 IX86_BUILTIN_PMAXUW128,
24018 IX86_BUILTIN_PMINSB128,
24019 IX86_BUILTIN_PMINSD128,
24020 IX86_BUILTIN_PMINUD128,
24021 IX86_BUILTIN_PMINUW128,
24023 IX86_BUILTIN_PMOVSXBW128,
24024 IX86_BUILTIN_PMOVSXBD128,
24025 IX86_BUILTIN_PMOVSXBQ128,
24026 IX86_BUILTIN_PMOVSXWD128,
24027 IX86_BUILTIN_PMOVSXWQ128,
24028 IX86_BUILTIN_PMOVSXDQ128,
24030 IX86_BUILTIN_PMOVZXBW128,
24031 IX86_BUILTIN_PMOVZXBD128,
24032 IX86_BUILTIN_PMOVZXBQ128,
24033 IX86_BUILTIN_PMOVZXWD128,
24034 IX86_BUILTIN_PMOVZXWQ128,
24035 IX86_BUILTIN_PMOVZXDQ128,
24037 IX86_BUILTIN_PMULDQ128,
24038 IX86_BUILTIN_PMULLD128,
24040 IX86_BUILTIN_ROUNDPD,
24041 IX86_BUILTIN_ROUNDPS,
24042 IX86_BUILTIN_ROUNDSD,
24043 IX86_BUILTIN_ROUNDSS,
24045 IX86_BUILTIN_FLOORPD,
24046 IX86_BUILTIN_CEILPD,
24047 IX86_BUILTIN_TRUNCPD,
24048 IX86_BUILTIN_RINTPD,
24049 IX86_BUILTIN_FLOORPS,
24050 IX86_BUILTIN_CEILPS,
24051 IX86_BUILTIN_TRUNCPS,
24052 IX86_BUILTIN_RINTPS,
24054 IX86_BUILTIN_PTESTZ,
24055 IX86_BUILTIN_PTESTC,
24056 IX86_BUILTIN_PTESTNZC,
24058 IX86_BUILTIN_VEC_INIT_V2SI,
24059 IX86_BUILTIN_VEC_INIT_V4HI,
24060 IX86_BUILTIN_VEC_INIT_V8QI,
24061 IX86_BUILTIN_VEC_EXT_V2DF,
24062 IX86_BUILTIN_VEC_EXT_V2DI,
24063 IX86_BUILTIN_VEC_EXT_V4SF,
24064 IX86_BUILTIN_VEC_EXT_V4SI,
24065 IX86_BUILTIN_VEC_EXT_V8HI,
24066 IX86_BUILTIN_VEC_EXT_V2SI,
24067 IX86_BUILTIN_VEC_EXT_V4HI,
24068 IX86_BUILTIN_VEC_EXT_V16QI,
24069 IX86_BUILTIN_VEC_SET_V2DI,
24070 IX86_BUILTIN_VEC_SET_V4SF,
24071 IX86_BUILTIN_VEC_SET_V4SI,
24072 IX86_BUILTIN_VEC_SET_V8HI,
24073 IX86_BUILTIN_VEC_SET_V4HI,
24074 IX86_BUILTIN_VEC_SET_V16QI,
24076 IX86_BUILTIN_VEC_PACK_SFIX,
24079 IX86_BUILTIN_CRC32QI,
24080 IX86_BUILTIN_CRC32HI,
24081 IX86_BUILTIN_CRC32SI,
24082 IX86_BUILTIN_CRC32DI,
24084 IX86_BUILTIN_PCMPESTRI128,
24085 IX86_BUILTIN_PCMPESTRM128,
24086 IX86_BUILTIN_PCMPESTRA128,
24087 IX86_BUILTIN_PCMPESTRC128,
24088 IX86_BUILTIN_PCMPESTRO128,
24089 IX86_BUILTIN_PCMPESTRS128,
24090 IX86_BUILTIN_PCMPESTRZ128,
24091 IX86_BUILTIN_PCMPISTRI128,
24092 IX86_BUILTIN_PCMPISTRM128,
24093 IX86_BUILTIN_PCMPISTRA128,
24094 IX86_BUILTIN_PCMPISTRC128,
24095 IX86_BUILTIN_PCMPISTRO128,
24096 IX86_BUILTIN_PCMPISTRS128,
24097 IX86_BUILTIN_PCMPISTRZ128,
24099 IX86_BUILTIN_PCMPGTQ,
24101 /* AES instructions */
24102 IX86_BUILTIN_AESENC128,
24103 IX86_BUILTIN_AESENCLAST128,
24104 IX86_BUILTIN_AESDEC128,
24105 IX86_BUILTIN_AESDECLAST128,
24106 IX86_BUILTIN_AESIMC128,
24107 IX86_BUILTIN_AESKEYGENASSIST128,
24109 /* PCLMUL instruction */
24110 IX86_BUILTIN_PCLMULQDQ128,
24113 IX86_BUILTIN_ADDPD256,
24114 IX86_BUILTIN_ADDPS256,
24115 IX86_BUILTIN_ADDSUBPD256,
24116 IX86_BUILTIN_ADDSUBPS256,
24117 IX86_BUILTIN_ANDPD256,
24118 IX86_BUILTIN_ANDPS256,
24119 IX86_BUILTIN_ANDNPD256,
24120 IX86_BUILTIN_ANDNPS256,
24121 IX86_BUILTIN_BLENDPD256,
24122 IX86_BUILTIN_BLENDPS256,
24123 IX86_BUILTIN_BLENDVPD256,
24124 IX86_BUILTIN_BLENDVPS256,
24125 IX86_BUILTIN_DIVPD256,
24126 IX86_BUILTIN_DIVPS256,
24127 IX86_BUILTIN_DPPS256,
24128 IX86_BUILTIN_HADDPD256,
24129 IX86_BUILTIN_HADDPS256,
24130 IX86_BUILTIN_HSUBPD256,
24131 IX86_BUILTIN_HSUBPS256,
24132 IX86_BUILTIN_MAXPD256,
24133 IX86_BUILTIN_MAXPS256,
24134 IX86_BUILTIN_MINPD256,
24135 IX86_BUILTIN_MINPS256,
24136 IX86_BUILTIN_MULPD256,
24137 IX86_BUILTIN_MULPS256,
24138 IX86_BUILTIN_ORPD256,
24139 IX86_BUILTIN_ORPS256,
24140 IX86_BUILTIN_SHUFPD256,
24141 IX86_BUILTIN_SHUFPS256,
24142 IX86_BUILTIN_SUBPD256,
24143 IX86_BUILTIN_SUBPS256,
24144 IX86_BUILTIN_XORPD256,
24145 IX86_BUILTIN_XORPS256,
24146 IX86_BUILTIN_CMPSD,
24147 IX86_BUILTIN_CMPSS,
24148 IX86_BUILTIN_CMPPD,
24149 IX86_BUILTIN_CMPPS,
24150 IX86_BUILTIN_CMPPD256,
24151 IX86_BUILTIN_CMPPS256,
24152 IX86_BUILTIN_CVTDQ2PD256,
24153 IX86_BUILTIN_CVTDQ2PS256,
24154 IX86_BUILTIN_CVTPD2PS256,
24155 IX86_BUILTIN_CVTPS2DQ256,
24156 IX86_BUILTIN_CVTPS2PD256,
24157 IX86_BUILTIN_CVTTPD2DQ256,
24158 IX86_BUILTIN_CVTPD2DQ256,
24159 IX86_BUILTIN_CVTTPS2DQ256,
24160 IX86_BUILTIN_EXTRACTF128PD256,
24161 IX86_BUILTIN_EXTRACTF128PS256,
24162 IX86_BUILTIN_EXTRACTF128SI256,
24163 IX86_BUILTIN_VZEROALL,
24164 IX86_BUILTIN_VZEROUPPER,
24165 IX86_BUILTIN_VPERMILVARPD,
24166 IX86_BUILTIN_VPERMILVARPS,
24167 IX86_BUILTIN_VPERMILVARPD256,
24168 IX86_BUILTIN_VPERMILVARPS256,
24169 IX86_BUILTIN_VPERMILPD,
24170 IX86_BUILTIN_VPERMILPS,
24171 IX86_BUILTIN_VPERMILPD256,
24172 IX86_BUILTIN_VPERMILPS256,
24173 IX86_BUILTIN_VPERMIL2PD,
24174 IX86_BUILTIN_VPERMIL2PS,
24175 IX86_BUILTIN_VPERMIL2PD256,
24176 IX86_BUILTIN_VPERMIL2PS256,
24177 IX86_BUILTIN_VPERM2F128PD256,
24178 IX86_BUILTIN_VPERM2F128PS256,
24179 IX86_BUILTIN_VPERM2F128SI256,
24180 IX86_BUILTIN_VBROADCASTSS,
24181 IX86_BUILTIN_VBROADCASTSD256,
24182 IX86_BUILTIN_VBROADCASTSS256,
24183 IX86_BUILTIN_VBROADCASTPD256,
24184 IX86_BUILTIN_VBROADCASTPS256,
24185 IX86_BUILTIN_VINSERTF128PD256,
24186 IX86_BUILTIN_VINSERTF128PS256,
24187 IX86_BUILTIN_VINSERTF128SI256,
24188 IX86_BUILTIN_LOADUPD256,
24189 IX86_BUILTIN_LOADUPS256,
24190 IX86_BUILTIN_STOREUPD256,
24191 IX86_BUILTIN_STOREUPS256,
24192 IX86_BUILTIN_LDDQU256,
24193 IX86_BUILTIN_MOVNTDQ256,
24194 IX86_BUILTIN_MOVNTPD256,
24195 IX86_BUILTIN_MOVNTPS256,
24196 IX86_BUILTIN_LOADDQU256,
24197 IX86_BUILTIN_STOREDQU256,
24198 IX86_BUILTIN_MASKLOADPD,
24199 IX86_BUILTIN_MASKLOADPS,
24200 IX86_BUILTIN_MASKSTOREPD,
24201 IX86_BUILTIN_MASKSTOREPS,
24202 IX86_BUILTIN_MASKLOADPD256,
24203 IX86_BUILTIN_MASKLOADPS256,
24204 IX86_BUILTIN_MASKSTOREPD256,
24205 IX86_BUILTIN_MASKSTOREPS256,
24206 IX86_BUILTIN_MOVSHDUP256,
24207 IX86_BUILTIN_MOVSLDUP256,
24208 IX86_BUILTIN_MOVDDUP256,
24210 IX86_BUILTIN_SQRTPD256,
24211 IX86_BUILTIN_SQRTPS256,
24212 IX86_BUILTIN_SQRTPS_NR256,
24213 IX86_BUILTIN_RSQRTPS256,
24214 IX86_BUILTIN_RSQRTPS_NR256,
24216 IX86_BUILTIN_RCPPS256,
24218 IX86_BUILTIN_ROUNDPD256,
24219 IX86_BUILTIN_ROUNDPS256,
24221 IX86_BUILTIN_FLOORPD256,
24222 IX86_BUILTIN_CEILPD256,
24223 IX86_BUILTIN_TRUNCPD256,
24224 IX86_BUILTIN_RINTPD256,
24225 IX86_BUILTIN_FLOORPS256,
24226 IX86_BUILTIN_CEILPS256,
24227 IX86_BUILTIN_TRUNCPS256,
24228 IX86_BUILTIN_RINTPS256,
24230 IX86_BUILTIN_UNPCKHPD256,
24231 IX86_BUILTIN_UNPCKLPD256,
24232 IX86_BUILTIN_UNPCKHPS256,
24233 IX86_BUILTIN_UNPCKLPS256,
24235 IX86_BUILTIN_SI256_SI,
24236 IX86_BUILTIN_PS256_PS,
24237 IX86_BUILTIN_PD256_PD,
24238 IX86_BUILTIN_SI_SI256,
24239 IX86_BUILTIN_PS_PS256,
24240 IX86_BUILTIN_PD_PD256,
24242 IX86_BUILTIN_VTESTZPD,
24243 IX86_BUILTIN_VTESTCPD,
24244 IX86_BUILTIN_VTESTNZCPD,
24245 IX86_BUILTIN_VTESTZPS,
24246 IX86_BUILTIN_VTESTCPS,
24247 IX86_BUILTIN_VTESTNZCPS,
24248 IX86_BUILTIN_VTESTZPD256,
24249 IX86_BUILTIN_VTESTCPD256,
24250 IX86_BUILTIN_VTESTNZCPD256,
24251 IX86_BUILTIN_VTESTZPS256,
24252 IX86_BUILTIN_VTESTCPS256,
24253 IX86_BUILTIN_VTESTNZCPS256,
24254 IX86_BUILTIN_PTESTZ256,
24255 IX86_BUILTIN_PTESTC256,
24256 IX86_BUILTIN_PTESTNZC256,
24258 IX86_BUILTIN_MOVMSKPD256,
24259 IX86_BUILTIN_MOVMSKPS256,
24261 /* TFmode support builtins. */
24263 IX86_BUILTIN_HUGE_VALQ,
24264 IX86_BUILTIN_FABSQ,
24265 IX86_BUILTIN_COPYSIGNQ,
24267 /* Vectorizer support builtins. */
24268 IX86_BUILTIN_CPYSGNPS,
24269 IX86_BUILTIN_CPYSGNPD,
24270 IX86_BUILTIN_CPYSGNPS256,
24271 IX86_BUILTIN_CPYSGNPD256,
24273 IX86_BUILTIN_CVTUDQ2PS,
24275 IX86_BUILTIN_VEC_PERM_V2DF,
24276 IX86_BUILTIN_VEC_PERM_V4SF,
24277 IX86_BUILTIN_VEC_PERM_V2DI,
24278 IX86_BUILTIN_VEC_PERM_V4SI,
24279 IX86_BUILTIN_VEC_PERM_V8HI,
24280 IX86_BUILTIN_VEC_PERM_V16QI,
24281 IX86_BUILTIN_VEC_PERM_V2DI_U,
24282 IX86_BUILTIN_VEC_PERM_V4SI_U,
24283 IX86_BUILTIN_VEC_PERM_V8HI_U,
24284 IX86_BUILTIN_VEC_PERM_V16QI_U,
24285 IX86_BUILTIN_VEC_PERM_V4DF,
24286 IX86_BUILTIN_VEC_PERM_V8SF,
24288 /* FMA4 and XOP instructions. */
24289 IX86_BUILTIN_VFMADDSS,
24290 IX86_BUILTIN_VFMADDSD,
24291 IX86_BUILTIN_VFMADDPS,
24292 IX86_BUILTIN_VFMADDPD,
24293 IX86_BUILTIN_VFMADDPS256,
24294 IX86_BUILTIN_VFMADDPD256,
24295 IX86_BUILTIN_VFMADDSUBPS,
24296 IX86_BUILTIN_VFMADDSUBPD,
24297 IX86_BUILTIN_VFMADDSUBPS256,
24298 IX86_BUILTIN_VFMADDSUBPD256,
24300 IX86_BUILTIN_VPCMOV,
24301 IX86_BUILTIN_VPCMOV_V2DI,
24302 IX86_BUILTIN_VPCMOV_V4SI,
24303 IX86_BUILTIN_VPCMOV_V8HI,
24304 IX86_BUILTIN_VPCMOV_V16QI,
24305 IX86_BUILTIN_VPCMOV_V4SF,
24306 IX86_BUILTIN_VPCMOV_V2DF,
24307 IX86_BUILTIN_VPCMOV256,
24308 IX86_BUILTIN_VPCMOV_V4DI256,
24309 IX86_BUILTIN_VPCMOV_V8SI256,
24310 IX86_BUILTIN_VPCMOV_V16HI256,
24311 IX86_BUILTIN_VPCMOV_V32QI256,
24312 IX86_BUILTIN_VPCMOV_V8SF256,
24313 IX86_BUILTIN_VPCMOV_V4DF256,
24315 IX86_BUILTIN_VPPERM,
24317 IX86_BUILTIN_VPMACSSWW,
24318 IX86_BUILTIN_VPMACSWW,
24319 IX86_BUILTIN_VPMACSSWD,
24320 IX86_BUILTIN_VPMACSWD,
24321 IX86_BUILTIN_VPMACSSDD,
24322 IX86_BUILTIN_VPMACSDD,
24323 IX86_BUILTIN_VPMACSSDQL,
24324 IX86_BUILTIN_VPMACSSDQH,
24325 IX86_BUILTIN_VPMACSDQL,
24326 IX86_BUILTIN_VPMACSDQH,
24327 IX86_BUILTIN_VPMADCSSWD,
24328 IX86_BUILTIN_VPMADCSWD,
24330 IX86_BUILTIN_VPHADDBW,
24331 IX86_BUILTIN_VPHADDBD,
24332 IX86_BUILTIN_VPHADDBQ,
24333 IX86_BUILTIN_VPHADDWD,
24334 IX86_BUILTIN_VPHADDWQ,
24335 IX86_BUILTIN_VPHADDDQ,
24336 IX86_BUILTIN_VPHADDUBW,
24337 IX86_BUILTIN_VPHADDUBD,
24338 IX86_BUILTIN_VPHADDUBQ,
24339 IX86_BUILTIN_VPHADDUWD,
24340 IX86_BUILTIN_VPHADDUWQ,
24341 IX86_BUILTIN_VPHADDUDQ,
24342 IX86_BUILTIN_VPHSUBBW,
24343 IX86_BUILTIN_VPHSUBWD,
24344 IX86_BUILTIN_VPHSUBDQ,
24346 IX86_BUILTIN_VPROTB,
24347 IX86_BUILTIN_VPROTW,
24348 IX86_BUILTIN_VPROTD,
24349 IX86_BUILTIN_VPROTQ,
24350 IX86_BUILTIN_VPROTB_IMM,
24351 IX86_BUILTIN_VPROTW_IMM,
24352 IX86_BUILTIN_VPROTD_IMM,
24353 IX86_BUILTIN_VPROTQ_IMM,
24355 IX86_BUILTIN_VPSHLB,
24356 IX86_BUILTIN_VPSHLW,
24357 IX86_BUILTIN_VPSHLD,
24358 IX86_BUILTIN_VPSHLQ,
24359 IX86_BUILTIN_VPSHAB,
24360 IX86_BUILTIN_VPSHAW,
24361 IX86_BUILTIN_VPSHAD,
24362 IX86_BUILTIN_VPSHAQ,
24364 IX86_BUILTIN_VFRCZSS,
24365 IX86_BUILTIN_VFRCZSD,
24366 IX86_BUILTIN_VFRCZPS,
24367 IX86_BUILTIN_VFRCZPD,
24368 IX86_BUILTIN_VFRCZPS256,
24369 IX86_BUILTIN_VFRCZPD256,
24371 IX86_BUILTIN_VPCOMEQUB,
24372 IX86_BUILTIN_VPCOMNEUB,
24373 IX86_BUILTIN_VPCOMLTUB,
24374 IX86_BUILTIN_VPCOMLEUB,
24375 IX86_BUILTIN_VPCOMGTUB,
24376 IX86_BUILTIN_VPCOMGEUB,
24377 IX86_BUILTIN_VPCOMFALSEUB,
24378 IX86_BUILTIN_VPCOMTRUEUB,
24380 IX86_BUILTIN_VPCOMEQUW,
24381 IX86_BUILTIN_VPCOMNEUW,
24382 IX86_BUILTIN_VPCOMLTUW,
24383 IX86_BUILTIN_VPCOMLEUW,
24384 IX86_BUILTIN_VPCOMGTUW,
24385 IX86_BUILTIN_VPCOMGEUW,
24386 IX86_BUILTIN_VPCOMFALSEUW,
24387 IX86_BUILTIN_VPCOMTRUEUW,
24389 IX86_BUILTIN_VPCOMEQUD,
24390 IX86_BUILTIN_VPCOMNEUD,
24391 IX86_BUILTIN_VPCOMLTUD,
24392 IX86_BUILTIN_VPCOMLEUD,
24393 IX86_BUILTIN_VPCOMGTUD,
24394 IX86_BUILTIN_VPCOMGEUD,
24395 IX86_BUILTIN_VPCOMFALSEUD,
24396 IX86_BUILTIN_VPCOMTRUEUD,
24398 IX86_BUILTIN_VPCOMEQUQ,
24399 IX86_BUILTIN_VPCOMNEUQ,
24400 IX86_BUILTIN_VPCOMLTUQ,
24401 IX86_BUILTIN_VPCOMLEUQ,
24402 IX86_BUILTIN_VPCOMGTUQ,
24403 IX86_BUILTIN_VPCOMGEUQ,
24404 IX86_BUILTIN_VPCOMFALSEUQ,
24405 IX86_BUILTIN_VPCOMTRUEUQ,
24407 IX86_BUILTIN_VPCOMEQB,
24408 IX86_BUILTIN_VPCOMNEB,
24409 IX86_BUILTIN_VPCOMLTB,
24410 IX86_BUILTIN_VPCOMLEB,
24411 IX86_BUILTIN_VPCOMGTB,
24412 IX86_BUILTIN_VPCOMGEB,
24413 IX86_BUILTIN_VPCOMFALSEB,
24414 IX86_BUILTIN_VPCOMTRUEB,
24416 IX86_BUILTIN_VPCOMEQW,
24417 IX86_BUILTIN_VPCOMNEW,
24418 IX86_BUILTIN_VPCOMLTW,
24419 IX86_BUILTIN_VPCOMLEW,
24420 IX86_BUILTIN_VPCOMGTW,
24421 IX86_BUILTIN_VPCOMGEW,
24422 IX86_BUILTIN_VPCOMFALSEW,
24423 IX86_BUILTIN_VPCOMTRUEW,
24425 IX86_BUILTIN_VPCOMEQD,
24426 IX86_BUILTIN_VPCOMNED,
24427 IX86_BUILTIN_VPCOMLTD,
24428 IX86_BUILTIN_VPCOMLED,
24429 IX86_BUILTIN_VPCOMGTD,
24430 IX86_BUILTIN_VPCOMGED,
24431 IX86_BUILTIN_VPCOMFALSED,
24432 IX86_BUILTIN_VPCOMTRUED,
24434 IX86_BUILTIN_VPCOMEQQ,
24435 IX86_BUILTIN_VPCOMNEQ,
24436 IX86_BUILTIN_VPCOMLTQ,
24437 IX86_BUILTIN_VPCOMLEQ,
24438 IX86_BUILTIN_VPCOMGTQ,
24439 IX86_BUILTIN_VPCOMGEQ,
24440 IX86_BUILTIN_VPCOMFALSEQ,
24441 IX86_BUILTIN_VPCOMTRUEQ,
24443 /* LWP instructions. */
24444 IX86_BUILTIN_LLWPCB,
24445 IX86_BUILTIN_SLWPCB,
24446 IX86_BUILTIN_LWPVAL32,
24447 IX86_BUILTIN_LWPVAL64,
24448 IX86_BUILTIN_LWPINS32,
24449 IX86_BUILTIN_LWPINS64,
24453 /* BMI instructions. */
24454 IX86_BUILTIN_BEXTR32,
24455 IX86_BUILTIN_BEXTR64,
24458 /* TBM instructions. */
24459 IX86_BUILTIN_BEXTRI32,
24460 IX86_BUILTIN_BEXTRI64,
24463 /* FSGSBASE instructions. */
24464 IX86_BUILTIN_RDFSBASE32,
24465 IX86_BUILTIN_RDFSBASE64,
24466 IX86_BUILTIN_RDGSBASE32,
24467 IX86_BUILTIN_RDGSBASE64,
24468 IX86_BUILTIN_WRFSBASE32,
24469 IX86_BUILTIN_WRFSBASE64,
24470 IX86_BUILTIN_WRGSBASE32,
24471 IX86_BUILTIN_WRGSBASE64,
24473 /* RDRND instructions. */
24474 IX86_BUILTIN_RDRAND16_STEP,
24475 IX86_BUILTIN_RDRAND32_STEP,
24476 IX86_BUILTIN_RDRAND64_STEP,
24478 /* F16C instructions. */
24479 IX86_BUILTIN_CVTPH2PS,
24480 IX86_BUILTIN_CVTPH2PS256,
24481 IX86_BUILTIN_CVTPS2PH,
24482 IX86_BUILTIN_CVTPS2PH256,
24484 /* CFString built-in for darwin */
24485 IX86_BUILTIN_CFSTRING,
24490 /* Table for the ix86 builtin decls. */
24491 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
24493 /* Table of all of the builtin functions that are possible with different ISA's
24494 but are waiting to be built until a function is declared to use that
24496 struct builtin_isa {
24497 const char *name; /* function name */
24498 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
24499 int isa; /* isa_flags this builtin is defined for */
24500 bool const_p; /* true if the declaration is constant */
24501 bool set_and_not_built_p;
24504 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
24507 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
24508 of which isa_flags to use in the ix86_builtins_isa array. Stores the
24509 function decl in the ix86_builtins array. Returns the function decl or
24510 NULL_TREE, if the builtin was not added.
24512 If the front end has a special hook for builtin functions, delay adding
24513 builtin functions that aren't in the current ISA until the ISA is changed
24514 with function specific optimization. Doing so, can save about 300K for the
24515 default compiler. When the builtin is expanded, check at that time whether
24518 If the front end doesn't have a special hook, record all builtins, even if
24519 it isn't an instruction set in the current ISA in case the user uses
24520 function specific options for a different ISA, so that we don't get scope
24521 errors if a builtin is added in the middle of a function scope. */
24524 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
24525 enum ix86_builtins code)
24527 tree decl = NULL_TREE;
24529 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
24531 ix86_builtins_isa[(int) code].isa = mask;
24533 mask &= ~OPTION_MASK_ISA_64BIT;
24535 || (mask & ix86_isa_flags) != 0
24536 || (lang_hooks.builtin_function
24537 == lang_hooks.builtin_function_ext_scope))
24540 tree type = ix86_get_builtin_func_type (tcode);
24541 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
24543 ix86_builtins[(int) code] = decl;
24544 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
24548 ix86_builtins[(int) code] = NULL_TREE;
24549 ix86_builtins_isa[(int) code].tcode = tcode;
24550 ix86_builtins_isa[(int) code].name = name;
24551 ix86_builtins_isa[(int) code].const_p = false;
24552 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
24559 /* Like def_builtin, but also marks the function decl "const". */
24562 def_builtin_const (int mask, const char *name,
24563 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
24565 tree decl = def_builtin (mask, name, tcode, code);
24567 TREE_READONLY (decl) = 1;
24569 ix86_builtins_isa[(int) code].const_p = true;
24574 /* Add any new builtin functions for a given ISA that may not have been
24575 declared. This saves a bit of space compared to adding all of the
24576 declarations to the tree, even if we didn't use them. */
24579 ix86_add_new_builtins (int isa)
24583 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
24585 if ((ix86_builtins_isa[i].isa & isa) != 0
24586 && ix86_builtins_isa[i].set_and_not_built_p)
24590 /* Don't define the builtin again. */
24591 ix86_builtins_isa[i].set_and_not_built_p = false;
24593 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
24594 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
24595 type, i, BUILT_IN_MD, NULL,
24598 ix86_builtins[i] = decl;
24599 if (ix86_builtins_isa[i].const_p)
24600 TREE_READONLY (decl) = 1;
24605 /* Bits for builtin_description.flag. */
24607 /* Set when we don't support the comparison natively, and should
24608 swap_comparison in order to support it. */
24609 #define BUILTIN_DESC_SWAP_OPERANDS 1
24611 struct builtin_description
24613 const unsigned int mask;
24614 const enum insn_code icode;
24615 const char *const name;
24616 const enum ix86_builtins code;
24617 const enum rtx_code comparison;
24621 static const struct builtin_description bdesc_comi[] =
24623 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
24624 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
24625 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
24626 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
24627 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
24628 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
24629 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
24630 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
24631 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
24632 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
24633 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
24634 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
24635 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
24636 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
24637 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
24638 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
24639 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
24640 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
24641 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
24642 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
24643 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
24644 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
24645 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
24646 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
24649 static const struct builtin_description bdesc_pcmpestr[] =
24652 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
24653 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
24654 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
24655 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
24656 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
24657 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
24658 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
24661 static const struct builtin_description bdesc_pcmpistr[] =
24664 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
24665 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
24666 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
24667 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
24668 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
24669 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
24670 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
24673 /* Special builtins with variable number of arguments. */
24674 static const struct builtin_description bdesc_special_args[] =
24676 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
24677 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
24680 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24683 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24686 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24687 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24688 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24690 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24691 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24692 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24693 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24695 /* SSE or 3DNow!A */
24696 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24697 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
24700 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24701 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24702 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24703 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
24704 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24705 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
24706 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
24707 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
24708 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24710 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24711 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24714 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24717 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
24720 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24721 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24724 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
24725 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
24727 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24728 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24729 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24730 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
24731 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
24733 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24734 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24735 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24736 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24737 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24738 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
24739 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24741 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
24742 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24743 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24745 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DI },
24746 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SI },
24747 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DI },
24748 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SI },
24749 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DI_V2DF },
24750 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SI_V4SF },
24751 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DI_V4DF },
24752 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SI_V8SF },
24754 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
24755 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
24756 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
24757 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
24758 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
24759 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
24762 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasesi, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24763 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasedi, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24764 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasesi, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24765 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasedi, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24766 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasesi, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24767 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasedi, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24768 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasesi, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24769 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasedi, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24772 /* Builtins with variable number of arguments. */
24773 static const struct builtin_description bdesc_args[] =
24775 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
24776 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
24777 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
24778 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24779 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24780 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24781 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24784 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24785 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24786 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24787 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24788 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24789 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24791 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24792 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24793 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24794 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24795 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24796 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24797 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24798 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24800 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24801 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24803 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24804 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24805 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24806 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24808 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24809 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24810 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24811 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24812 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24813 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24815 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24816 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24817 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24818 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24819 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
24820 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
24822 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24823 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
24824 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24826 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
24828 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24829 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24830 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24831 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24832 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24833 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24835 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24836 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24837 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24838 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24839 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24840 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24842 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24843 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24844 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24845 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24848 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24849 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24850 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24851 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24853 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24854 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24855 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24856 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24857 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24858 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24859 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24860 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24861 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24862 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24863 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24864 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24865 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24866 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24867 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24870 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24871 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24872 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
24873 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24874 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24875 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24878 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
24879 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24880 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24881 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24882 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24883 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24884 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24885 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24886 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24887 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24888 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24889 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24891 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24893 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24894 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24895 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24896 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24897 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24898 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24899 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24900 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24902 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24903 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24904 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24905 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24906 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24907 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24908 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24909 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24910 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24911 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24912 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
24913 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24914 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24915 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24916 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24917 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24918 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24919 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24920 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24921 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24922 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24923 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24925 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24926 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24927 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24928 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24930 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24931 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24932 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24933 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24935 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24937 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24938 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24939 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24940 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24941 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24943 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
24944 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
24945 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
24947 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
24949 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24950 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24951 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24953 /* SSE MMX or 3Dnow!A */
24954 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24955 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24956 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24958 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24959 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24960 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24961 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24963 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
24964 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
24966 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
24969 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24971 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
24972 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
24973 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
24974 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
24975 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
24976 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
24977 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
24978 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
24979 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
24980 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
24981 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
24982 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
24984 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
24985 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
24986 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
24987 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
24988 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
24989 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
24991 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
24992 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
24993 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
24994 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
24995 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
24997 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
24999 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
25000 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
25001 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
25002 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
25004 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
25005 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
25006 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
25008 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25009 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25010 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25011 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25012 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25013 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25014 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25015 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25017 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
25018 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
25019 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
25020 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25021 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
25022 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25023 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
25024 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
25025 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
25026 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25027 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
25028 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25029 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
25030 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
25031 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
25032 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25033 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
25034 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
25035 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
25036 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
25038 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25039 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25041 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25043 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25044 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25045 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25046 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25048 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25050 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25051 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25052 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25054 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
25056 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25057 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25058 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25059 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25060 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25061 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25062 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25063 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25065 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25066 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25067 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25068 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25069 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25070 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25071 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25072 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25074 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25075 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
25077 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25078 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25079 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25080 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25082 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25083 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25085 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25086 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25087 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25088 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25089 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25090 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25092 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25093 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25094 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25095 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25097 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25098 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25099 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25100 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25101 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25102 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25103 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25104 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25106 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
25107 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
25108 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
25110 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25111 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
25113 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
25114 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
25116 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
25118 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
25119 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
25120 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
25121 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
25123 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
25124 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25125 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25126 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
25127 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25128 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25129 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
25131 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
25132 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25133 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25134 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
25135 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25136 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25137 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
25139 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
25140 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
25141 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
25142 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
25144 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
25145 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
25146 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
25148 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
25150 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
25151 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
25153 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
25156 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
25157 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
25160 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
25161 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
25163 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25164 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25165 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25166 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25167 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25168 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
25171 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
25172 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
25173 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
25174 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
25175 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
25176 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
25178 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25179 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25180 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25181 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25182 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25183 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25184 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25185 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25186 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25187 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25188 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25189 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25190 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
25191 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
25192 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25193 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25194 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25195 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25196 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25197 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25198 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25199 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25200 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25201 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25204 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
25205 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
25208 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25209 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25210 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
25211 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
25212 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25213 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25214 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25215 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
25216 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
25217 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
25219 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
25220 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
25221 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
25222 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
25223 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
25224 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
25225 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
25226 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
25227 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
25228 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
25229 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
25230 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
25231 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
25233 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
25234 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25235 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25236 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25237 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25238 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25239 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
25240 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25241 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25242 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
25243 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
25244 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
25247 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
25248 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
25249 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25250 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25252 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_floorpd", IX86_BUILTIN_FLOORPD, (enum rtx_code) ROUND_FLOOR, (int) V2DF_FTYPE_V2DF_ROUND },
25253 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_ceilpd", IX86_BUILTIN_CEILPD, (enum rtx_code) ROUND_CEIL, (int) V2DF_FTYPE_V2DF_ROUND },
25254 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_truncpd", IX86_BUILTIN_TRUNCPD, (enum rtx_code) ROUND_TRUNC, (int) V2DF_FTYPE_V2DF_ROUND },
25255 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_rintpd", IX86_BUILTIN_RINTPD, (enum rtx_code) ROUND_MXCSR, (int) V2DF_FTYPE_V2DF_ROUND },
25257 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_floorps", IX86_BUILTIN_FLOORPS, (enum rtx_code) ROUND_FLOOR, (int) V4SF_FTYPE_V4SF_ROUND },
25258 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_ceilps", IX86_BUILTIN_CEILPS, (enum rtx_code) ROUND_CEIL, (int) V4SF_FTYPE_V4SF_ROUND },
25259 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_truncps", IX86_BUILTIN_TRUNCPS, (enum rtx_code) ROUND_TRUNC, (int) V4SF_FTYPE_V4SF_ROUND },
25260 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_rintps", IX86_BUILTIN_RINTPS, (enum rtx_code) ROUND_MXCSR, (int) V4SF_FTYPE_V4SF_ROUND },
25262 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25263 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25264 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
25267 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25268 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
25269 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
25270 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25271 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25274 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
25275 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
25276 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
25277 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25280 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
25281 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
25283 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25284 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25285 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25286 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
25289 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
25292 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25293 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25294 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25295 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25296 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25297 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25298 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25299 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25300 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25301 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25302 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25303 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25304 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25305 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25306 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25307 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25308 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25309 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25310 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25311 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25312 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25313 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25314 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25315 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25316 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25317 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25319 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
25320 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
25321 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
25322 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
25324 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25325 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25326 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
25327 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
25328 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25329 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25330 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25331 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25332 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25333 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
25334 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25335 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25336 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25337 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
25338 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
25339 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
25340 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
25341 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
25342 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
25343 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
25344 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
25345 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
25346 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
25347 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
25348 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
25349 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
25350 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
25351 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
25352 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
25353 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
25354 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
25355 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
25356 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
25357 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
25359 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25360 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25361 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
25363 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
25364 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25365 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25366 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25367 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25369 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
25371 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
25372 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
25374 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_floorpd256", IX86_BUILTIN_FLOORPD256, (enum rtx_code) ROUND_FLOOR, (int) V4DF_FTYPE_V4DF_ROUND },
25375 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_ceilpd256", IX86_BUILTIN_CEILPD256, (enum rtx_code) ROUND_CEIL, (int) V4DF_FTYPE_V4DF_ROUND },
25376 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_truncpd256", IX86_BUILTIN_TRUNCPD256, (enum rtx_code) ROUND_TRUNC, (int) V4DF_FTYPE_V4DF_ROUND },
25377 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_rintpd256", IX86_BUILTIN_RINTPD256, (enum rtx_code) ROUND_MXCSR, (int) V4DF_FTYPE_V4DF_ROUND },
25379 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_floorps256", IX86_BUILTIN_FLOORPS256, (enum rtx_code) ROUND_FLOOR, (int) V8SF_FTYPE_V8SF_ROUND },
25380 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_ceilps256", IX86_BUILTIN_CEILPS256, (enum rtx_code) ROUND_CEIL, (int) V8SF_FTYPE_V8SF_ROUND },
25381 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_truncps256", IX86_BUILTIN_TRUNCPS256, (enum rtx_code) ROUND_TRUNC, (int) V8SF_FTYPE_V8SF_ROUND },
25382 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_rintps256", IX86_BUILTIN_RINTPS256, (enum rtx_code) ROUND_MXCSR, (int) V8SF_FTYPE_V8SF_ROUND },
25384 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25385 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25386 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25387 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25389 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
25390 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
25391 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
25392 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
25393 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
25394 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
25396 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25397 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25398 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
25399 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25400 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25401 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
25402 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25403 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25404 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
25405 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25406 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25407 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
25408 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25409 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25410 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
25412 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
25413 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
25415 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv8sf3, "__builtin_ia32_copysignps256", IX86_BUILTIN_CPYSGNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
25416 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv4df3, "__builtin_ia32_copysignpd256", IX86_BUILTIN_CPYSGNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
25418 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25421 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_si, "__builtin_ia32_bextr_u32", IX86_BUILTIN_BEXTR32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25422 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_di, "__builtin_ia32_bextr_u64", IX86_BUILTIN_BEXTR64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25423 { OPTION_MASK_ISA_BMI, CODE_FOR_ctzhi2, "__builtin_ctzs", IX86_BUILTIN_CTZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25426 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_si, "__builtin_ia32_bextri_u32", IX86_BUILTIN_BEXTRI32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25427 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_di, "__builtin_ia32_bextri_u64", IX86_BUILTIN_BEXTRI64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25430 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS, UNKNOWN, (int) V4SF_FTYPE_V8HI },
25431 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps256, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256, UNKNOWN, (int) V8SF_FTYPE_V8HI },
25432 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH, UNKNOWN, (int) V8HI_FTYPE_V4SF_INT },
25433 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph256, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256, UNKNOWN, (int) V8HI_FTYPE_V8SF_INT },
25436 /* FMA4 and XOP. */
25437 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
25438 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
25439 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
25440 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
25441 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
25442 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
25443 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
25444 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
25445 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
25446 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
25447 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
25448 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
25449 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
25450 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
25451 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
25452 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
25453 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
25454 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
25455 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
25456 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
25457 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
25458 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
25459 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
25460 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
25461 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
25462 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
25463 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
25464 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
25465 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
25466 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
25467 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
25468 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
25469 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
25470 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
25471 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
25472 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
25473 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
25474 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
25475 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
25476 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
25477 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
25478 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
25479 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
25480 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
25481 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
25482 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
25483 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
25484 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
25485 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
25486 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
25487 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
25488 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
25490 static const struct builtin_description bdesc_multi_arg[] =
25492 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v4sf,
25493 "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS,
25494 UNKNOWN, (int)MULTI_ARG_3_SF },
25495 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v2df,
25496 "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD,
25497 UNKNOWN, (int)MULTI_ARG_3_DF },
25499 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4sf,
25500 "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS,
25501 UNKNOWN, (int)MULTI_ARG_3_SF },
25502 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v2df,
25503 "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD,
25504 UNKNOWN, (int)MULTI_ARG_3_DF },
25505 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v8sf,
25506 "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256,
25507 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25508 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4df,
25509 "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256,
25510 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25512 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4sf,
25513 "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS,
25514 UNKNOWN, (int)MULTI_ARG_3_SF },
25515 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v2df,
25516 "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD,
25517 UNKNOWN, (int)MULTI_ARG_3_DF },
25518 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v8sf,
25519 "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256,
25520 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25521 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4df,
25522 "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256,
25523 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25525 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
25526 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
25527 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
25528 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
25529 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
25530 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
25531 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
25533 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25534 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25535 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
25536 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
25537 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
25538 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
25539 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
25541 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
25543 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25544 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25545 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25546 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25547 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25548 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25549 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25550 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25551 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25552 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25553 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25554 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25556 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25557 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
25558 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
25559 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
25560 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
25561 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
25562 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
25563 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
25564 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25565 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
25566 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
25567 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
25568 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25569 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
25570 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
25571 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
25573 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
25574 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
25575 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
25576 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
25577 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
25578 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
25580 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25581 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25582 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25583 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25584 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25585 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25586 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25587 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25588 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25589 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25590 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25591 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25592 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25593 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25594 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25596 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
25597 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25598 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25599 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
25600 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
25601 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
25602 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
25604 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
25605 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25606 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25607 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
25608 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
25609 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
25610 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
25612 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
25613 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25614 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25615 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
25616 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
25617 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
25618 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
25620 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25621 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25622 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25623 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
25624 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
25625 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
25626 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
25628 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
25629 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25630 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25631 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
25632 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
25633 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
25634 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
25636 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
25637 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25638 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25639 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
25640 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
25641 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
25642 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
25644 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
25645 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25646 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25647 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
25648 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
25649 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
25650 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
25652 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25653 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25654 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25655 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
25656 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
25657 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
25658 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
25660 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25661 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25662 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25663 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25664 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25665 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25666 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25667 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25669 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25670 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25671 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25672 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25673 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25674 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25675 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25676 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25678 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
25679 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
25680 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
25681 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
25685 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
25686 in the current target ISA to allow the user to compile particular modules
25687 with different target specific options that differ from the command line
25690 ix86_init_mmx_sse_builtins (void)
25692 const struct builtin_description * d;
25693 enum ix86_builtin_func_type ftype;
25696 /* Add all special builtins with variable number of operands. */
25697 for (i = 0, d = bdesc_special_args;
25698 i < ARRAY_SIZE (bdesc_special_args);
25704 ftype = (enum ix86_builtin_func_type) d->flag;
25705 def_builtin (d->mask, d->name, ftype, d->code);
25708 /* Add all builtins with variable number of operands. */
25709 for (i = 0, d = bdesc_args;
25710 i < ARRAY_SIZE (bdesc_args);
25716 ftype = (enum ix86_builtin_func_type) d->flag;
25717 def_builtin_const (d->mask, d->name, ftype, d->code);
25720 /* pcmpestr[im] insns. */
25721 for (i = 0, d = bdesc_pcmpestr;
25722 i < ARRAY_SIZE (bdesc_pcmpestr);
25725 if (d->code == IX86_BUILTIN_PCMPESTRM128)
25726 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
25728 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
25729 def_builtin_const (d->mask, d->name, ftype, d->code);
25732 /* pcmpistr[im] insns. */
25733 for (i = 0, d = bdesc_pcmpistr;
25734 i < ARRAY_SIZE (bdesc_pcmpistr);
25737 if (d->code == IX86_BUILTIN_PCMPISTRM128)
25738 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
25740 ftype = INT_FTYPE_V16QI_V16QI_INT;
25741 def_builtin_const (d->mask, d->name, ftype, d->code);
25744 /* comi/ucomi insns. */
25745 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
25747 if (d->mask == OPTION_MASK_ISA_SSE2)
25748 ftype = INT_FTYPE_V2DF_V2DF;
25750 ftype = INT_FTYPE_V4SF_V4SF;
25751 def_builtin_const (d->mask, d->name, ftype, d->code);
25755 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
25756 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
25757 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
25758 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
25760 /* SSE or 3DNow!A */
25761 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25762 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
25763 IX86_BUILTIN_MASKMOVQ);
25766 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
25767 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
25769 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
25770 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
25771 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
25772 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
25775 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
25776 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
25777 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
25778 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
25781 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
25782 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
25783 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
25784 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
25785 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
25786 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
25787 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
25788 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
25789 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
25790 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
25791 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
25792 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
25795 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
25796 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
25799 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand16_step",
25800 INT_FTYPE_PUSHORT, IX86_BUILTIN_RDRAND16_STEP);
25801 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand32_step",
25802 INT_FTYPE_PUNSIGNED, IX86_BUILTIN_RDRAND32_STEP);
25803 def_builtin (OPTION_MASK_ISA_RDRND | OPTION_MASK_ISA_64BIT,
25804 "__builtin_ia32_rdrand64_step", INT_FTYPE_PULONGLONG,
25805 IX86_BUILTIN_RDRAND64_STEP);
25807 /* MMX access to the vec_init patterns. */
25808 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
25809 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
25811 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
25812 V4HI_FTYPE_HI_HI_HI_HI,
25813 IX86_BUILTIN_VEC_INIT_V4HI);
25815 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
25816 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
25817 IX86_BUILTIN_VEC_INIT_V8QI);
25819 /* Access to the vec_extract patterns. */
25820 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
25821 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
25822 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
25823 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
25824 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
25825 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
25826 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
25827 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
25828 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
25829 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
25831 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25832 "__builtin_ia32_vec_ext_v4hi",
25833 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
25835 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
25836 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
25838 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
25839 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
25841 /* Access to the vec_set patterns. */
25842 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
25843 "__builtin_ia32_vec_set_v2di",
25844 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
25846 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
25847 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
25849 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
25850 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
25852 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
25853 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
25855 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25856 "__builtin_ia32_vec_set_v4hi",
25857 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
25859 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
25860 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
25862 /* Add FMA4 multi-arg argument instructions */
25863 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
25868 ftype = (enum ix86_builtin_func_type) d->flag;
25869 def_builtin_const (d->mask, d->name, ftype, d->code);
25873 /* Internal method for ix86_init_builtins. */
25876 ix86_init_builtins_va_builtins_abi (void)
25878 tree ms_va_ref, sysv_va_ref;
25879 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
25880 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
25881 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
25882 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
25886 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
25887 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
25888 ms_va_ref = build_reference_type (ms_va_list_type_node);
25890 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
25893 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25894 fnvoid_va_start_ms =
25895 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25896 fnvoid_va_end_sysv =
25897 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
25898 fnvoid_va_start_sysv =
25899 build_varargs_function_type_list (void_type_node, sysv_va_ref,
25901 fnvoid_va_copy_ms =
25902 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
25904 fnvoid_va_copy_sysv =
25905 build_function_type_list (void_type_node, sysv_va_ref,
25906 sysv_va_ref, NULL_TREE);
25908 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
25909 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
25910 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
25911 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
25912 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
25913 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
25914 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
25915 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25916 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
25917 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25918 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
25919 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25923 ix86_init_builtin_types (void)
25925 tree float128_type_node, float80_type_node;
25927 /* The __float80 type. */
25928 float80_type_node = long_double_type_node;
25929 if (TYPE_MODE (float80_type_node) != XFmode)
25931 /* The __float80 type. */
25932 float80_type_node = make_node (REAL_TYPE);
25934 TYPE_PRECISION (float80_type_node) = 80;
25935 layout_type (float80_type_node);
25937 lang_hooks.types.register_builtin_type (float80_type_node, "__float80");
25939 /* The __float128 type. */
25940 float128_type_node = make_node (REAL_TYPE);
25941 TYPE_PRECISION (float128_type_node) = 128;
25942 layout_type (float128_type_node);
25943 lang_hooks.types.register_builtin_type (float128_type_node, "__float128");
25945 /* This macro is built by i386-builtin-types.awk. */
25946 DEFINE_BUILTIN_PRIMITIVE_TYPES;
25950 ix86_init_builtins (void)
25954 ix86_init_builtin_types ();
25956 /* TFmode support builtins. */
25957 def_builtin_const (0, "__builtin_infq",
25958 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
25959 def_builtin_const (0, "__builtin_huge_valq",
25960 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
25962 /* We will expand them to normal call if SSE2 isn't available since
25963 they are used by libgcc. */
25964 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
25965 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
25966 BUILT_IN_MD, "__fabstf2", NULL_TREE);
25967 TREE_READONLY (t) = 1;
25968 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
25970 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
25971 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
25972 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
25973 TREE_READONLY (t) = 1;
25974 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
25976 ix86_init_mmx_sse_builtins ();
25979 ix86_init_builtins_va_builtins_abi ();
25981 #ifdef SUBTARGET_INIT_BUILTINS
25982 SUBTARGET_INIT_BUILTINS;
25986 /* Return the ix86 builtin for CODE. */
25989 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
25991 if (code >= IX86_BUILTIN_MAX)
25992 return error_mark_node;
25994 return ix86_builtins[code];
25997 /* Errors in the source file can cause expand_expr to return const0_rtx
25998 where we expect a vector. To avoid crashing, use one of the vector
25999 clear instructions. */
26001 safe_vector_operand (rtx x, enum machine_mode mode)
26003 if (x == const0_rtx)
26004 x = CONST0_RTX (mode);
26008 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
26011 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
26014 tree arg0 = CALL_EXPR_ARG (exp, 0);
26015 tree arg1 = CALL_EXPR_ARG (exp, 1);
26016 rtx op0 = expand_normal (arg0);
26017 rtx op1 = expand_normal (arg1);
26018 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26019 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
26020 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
26022 if (VECTOR_MODE_P (mode0))
26023 op0 = safe_vector_operand (op0, mode0);
26024 if (VECTOR_MODE_P (mode1))
26025 op1 = safe_vector_operand (op1, mode1);
26027 if (optimize || !target
26028 || GET_MODE (target) != tmode
26029 || !insn_data[icode].operand[0].predicate (target, tmode))
26030 target = gen_reg_rtx (tmode);
26032 if (GET_MODE (op1) == SImode && mode1 == TImode)
26034 rtx x = gen_reg_rtx (V4SImode);
26035 emit_insn (gen_sse2_loadd (x, op1));
26036 op1 = gen_lowpart (TImode, x);
26039 if (!insn_data[icode].operand[1].predicate (op0, mode0))
26040 op0 = copy_to_mode_reg (mode0, op0);
26041 if (!insn_data[icode].operand[2].predicate (op1, mode1))
26042 op1 = copy_to_mode_reg (mode1, op1);
26044 pat = GEN_FCN (icode) (target, op0, op1);
26053 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
26056 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
26057 enum ix86_builtin_func_type m_type,
26058 enum rtx_code sub_code)
26063 bool comparison_p = false;
26065 bool last_arg_constant = false;
26066 int num_memory = 0;
26069 enum machine_mode mode;
26072 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26076 case MULTI_ARG_4_DF2_DI_I:
26077 case MULTI_ARG_4_DF2_DI_I1:
26078 case MULTI_ARG_4_SF2_SI_I:
26079 case MULTI_ARG_4_SF2_SI_I1:
26081 last_arg_constant = true;
26084 case MULTI_ARG_3_SF:
26085 case MULTI_ARG_3_DF:
26086 case MULTI_ARG_3_SF2:
26087 case MULTI_ARG_3_DF2:
26088 case MULTI_ARG_3_DI:
26089 case MULTI_ARG_3_SI:
26090 case MULTI_ARG_3_SI_DI:
26091 case MULTI_ARG_3_HI:
26092 case MULTI_ARG_3_HI_SI:
26093 case MULTI_ARG_3_QI:
26094 case MULTI_ARG_3_DI2:
26095 case MULTI_ARG_3_SI2:
26096 case MULTI_ARG_3_HI2:
26097 case MULTI_ARG_3_QI2:
26101 case MULTI_ARG_2_SF:
26102 case MULTI_ARG_2_DF:
26103 case MULTI_ARG_2_DI:
26104 case MULTI_ARG_2_SI:
26105 case MULTI_ARG_2_HI:
26106 case MULTI_ARG_2_QI:
26110 case MULTI_ARG_2_DI_IMM:
26111 case MULTI_ARG_2_SI_IMM:
26112 case MULTI_ARG_2_HI_IMM:
26113 case MULTI_ARG_2_QI_IMM:
26115 last_arg_constant = true;
26118 case MULTI_ARG_1_SF:
26119 case MULTI_ARG_1_DF:
26120 case MULTI_ARG_1_SF2:
26121 case MULTI_ARG_1_DF2:
26122 case MULTI_ARG_1_DI:
26123 case MULTI_ARG_1_SI:
26124 case MULTI_ARG_1_HI:
26125 case MULTI_ARG_1_QI:
26126 case MULTI_ARG_1_SI_DI:
26127 case MULTI_ARG_1_HI_DI:
26128 case MULTI_ARG_1_HI_SI:
26129 case MULTI_ARG_1_QI_DI:
26130 case MULTI_ARG_1_QI_SI:
26131 case MULTI_ARG_1_QI_HI:
26135 case MULTI_ARG_2_DI_CMP:
26136 case MULTI_ARG_2_SI_CMP:
26137 case MULTI_ARG_2_HI_CMP:
26138 case MULTI_ARG_2_QI_CMP:
26140 comparison_p = true;
26143 case MULTI_ARG_2_SF_TF:
26144 case MULTI_ARG_2_DF_TF:
26145 case MULTI_ARG_2_DI_TF:
26146 case MULTI_ARG_2_SI_TF:
26147 case MULTI_ARG_2_HI_TF:
26148 case MULTI_ARG_2_QI_TF:
26154 gcc_unreachable ();
26157 if (optimize || !target
26158 || GET_MODE (target) != tmode
26159 || !insn_data[icode].operand[0].predicate (target, tmode))
26160 target = gen_reg_rtx (tmode);
26162 gcc_assert (nargs <= 4);
26164 for (i = 0; i < nargs; i++)
26166 tree arg = CALL_EXPR_ARG (exp, i);
26167 rtx op = expand_normal (arg);
26168 int adjust = (comparison_p) ? 1 : 0;
26169 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
26171 if (last_arg_constant && i == nargs-1)
26173 if (!CONST_INT_P (op))
26175 error ("last argument must be an immediate");
26176 return gen_reg_rtx (tmode);
26181 if (VECTOR_MODE_P (mode))
26182 op = safe_vector_operand (op, mode);
26184 /* If we aren't optimizing, only allow one memory operand to be
26186 if (memory_operand (op, mode))
26189 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
26192 || !insn_data[icode].operand[i+adjust+1].predicate (op, mode)
26194 op = force_reg (mode, op);
26198 args[i].mode = mode;
26204 pat = GEN_FCN (icode) (target, args[0].op);
26209 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
26210 GEN_INT ((int)sub_code));
26211 else if (! comparison_p)
26212 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
26215 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
26219 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
26224 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
26228 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
26232 gcc_unreachable ();
26242 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
26243 insns with vec_merge. */
26246 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
26250 tree arg0 = CALL_EXPR_ARG (exp, 0);
26251 rtx op1, op0 = expand_normal (arg0);
26252 enum machine_mode tmode = insn_data[icode].operand[0].mode;
26253 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
26255 if (optimize || !target
26256 || GET_MODE (target) != tmode
26257 || !insn_data[icode].operand[0].predicate (target, tmode))
26258 target = gen_reg_rtx (tmode);
26260 if (VECTOR_MODE_P (mode0))
26261 op0 = safe_vector_operand (op0, mode0);
26263 if ((optimize && !register_operand (op0, mode0))
26264 || !insn_data[icode].operand[1].predicate (op0, mode0))
26265 op0 = copy_to_mode_reg (mode0, op0);
26268 if (!insn_data[icode].operand[2].predicate (op1, mode0))
26269 op1 = copy_to_mode_reg (mode0, op1);
26271 pat = GEN_FCN (icode) (target, op0, op1);
26278 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
26281 ix86_expand_sse_compare (const struct builtin_description *d,
26282 tree exp, rtx target, bool swap)
26285 tree arg0 = CALL_EXPR_ARG (exp, 0);
26286 tree arg1 = CALL_EXPR_ARG (exp, 1);
26287 rtx op0 = expand_normal (arg0);
26288 rtx op1 = expand_normal (arg1);
26290 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
26291 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
26292 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
26293 enum rtx_code comparison = d->comparison;
26295 if (VECTOR_MODE_P (mode0))
26296 op0 = safe_vector_operand (op0, mode0);
26297 if (VECTOR_MODE_P (mode1))
26298 op1 = safe_vector_operand (op1, mode1);
26300 /* Swap operands if we have a comparison that isn't available in
26304 rtx tmp = gen_reg_rtx (mode1);
26305 emit_move_insn (tmp, op1);
26310 if (optimize || !target
26311 || GET_MODE (target) != tmode
26312 || !insn_data[d->icode].operand[0].predicate (target, tmode))
26313 target = gen_reg_rtx (tmode);
26315 if ((optimize && !register_operand (op0, mode0))
26316 || !insn_data[d->icode].operand[1].predicate (op0, mode0))
26317 op0 = copy_to_mode_reg (mode0, op0);
26318 if ((optimize && !register_operand (op1, mode1))
26319 || !insn_data[d->icode].operand[2].predicate (op1, mode1))
26320 op1 = copy_to_mode_reg (mode1, op1);
26322 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
26323 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
26330 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
26333 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
26337 tree arg0 = CALL_EXPR_ARG (exp, 0);
26338 tree arg1 = CALL_EXPR_ARG (exp, 1);
26339 rtx op0 = expand_normal (arg0);
26340 rtx op1 = expand_normal (arg1);
26341 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
26342 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
26343 enum rtx_code comparison = d->comparison;
26345 if (VECTOR_MODE_P (mode0))
26346 op0 = safe_vector_operand (op0, mode0);
26347 if (VECTOR_MODE_P (mode1))
26348 op1 = safe_vector_operand (op1, mode1);
26350 /* Swap operands if we have a comparison that isn't available in
26352 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
26359 target = gen_reg_rtx (SImode);
26360 emit_move_insn (target, const0_rtx);
26361 target = gen_rtx_SUBREG (QImode, target, 0);
26363 if ((optimize && !register_operand (op0, mode0))
26364 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26365 op0 = copy_to_mode_reg (mode0, op0);
26366 if ((optimize && !register_operand (op1, mode1))
26367 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26368 op1 = copy_to_mode_reg (mode1, op1);
26370 pat = GEN_FCN (d->icode) (op0, op1);
26374 emit_insn (gen_rtx_SET (VOIDmode,
26375 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26376 gen_rtx_fmt_ee (comparison, QImode,
26380 return SUBREG_REG (target);
26383 /* Subroutine of ix86_expand_args_builtin to take care of round insns. */
26386 ix86_expand_sse_round (const struct builtin_description *d, tree exp,
26390 tree arg0 = CALL_EXPR_ARG (exp, 0);
26391 rtx op1, op0 = expand_normal (arg0);
26392 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
26393 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
26395 if (optimize || target == 0
26396 || GET_MODE (target) != tmode
26397 || !insn_data[d->icode].operand[0].predicate (target, tmode))
26398 target = gen_reg_rtx (tmode);
26400 if (VECTOR_MODE_P (mode0))
26401 op0 = safe_vector_operand (op0, mode0);
26403 if ((optimize && !register_operand (op0, mode0))
26404 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26405 op0 = copy_to_mode_reg (mode0, op0);
26407 op1 = GEN_INT (d->comparison);
26409 pat = GEN_FCN (d->icode) (target, op0, op1);
26416 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
26419 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
26423 tree arg0 = CALL_EXPR_ARG (exp, 0);
26424 tree arg1 = CALL_EXPR_ARG (exp, 1);
26425 rtx op0 = expand_normal (arg0);
26426 rtx op1 = expand_normal (arg1);
26427 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
26428 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
26429 enum rtx_code comparison = d->comparison;
26431 if (VECTOR_MODE_P (mode0))
26432 op0 = safe_vector_operand (op0, mode0);
26433 if (VECTOR_MODE_P (mode1))
26434 op1 = safe_vector_operand (op1, mode1);
26436 target = gen_reg_rtx (SImode);
26437 emit_move_insn (target, const0_rtx);
26438 target = gen_rtx_SUBREG (QImode, target, 0);
26440 if ((optimize && !register_operand (op0, mode0))
26441 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26442 op0 = copy_to_mode_reg (mode0, op0);
26443 if ((optimize && !register_operand (op1, mode1))
26444 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26445 op1 = copy_to_mode_reg (mode1, op1);
26447 pat = GEN_FCN (d->icode) (op0, op1);
26451 emit_insn (gen_rtx_SET (VOIDmode,
26452 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26453 gen_rtx_fmt_ee (comparison, QImode,
26457 return SUBREG_REG (target);
26460 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
26463 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
26464 tree exp, rtx target)
26467 tree arg0 = CALL_EXPR_ARG (exp, 0);
26468 tree arg1 = CALL_EXPR_ARG (exp, 1);
26469 tree arg2 = CALL_EXPR_ARG (exp, 2);
26470 tree arg3 = CALL_EXPR_ARG (exp, 3);
26471 tree arg4 = CALL_EXPR_ARG (exp, 4);
26472 rtx scratch0, scratch1;
26473 rtx op0 = expand_normal (arg0);
26474 rtx op1 = expand_normal (arg1);
26475 rtx op2 = expand_normal (arg2);
26476 rtx op3 = expand_normal (arg3);
26477 rtx op4 = expand_normal (arg4);
26478 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
26480 tmode0 = insn_data[d->icode].operand[0].mode;
26481 tmode1 = insn_data[d->icode].operand[1].mode;
26482 modev2 = insn_data[d->icode].operand[2].mode;
26483 modei3 = insn_data[d->icode].operand[3].mode;
26484 modev4 = insn_data[d->icode].operand[4].mode;
26485 modei5 = insn_data[d->icode].operand[5].mode;
26486 modeimm = insn_data[d->icode].operand[6].mode;
26488 if (VECTOR_MODE_P (modev2))
26489 op0 = safe_vector_operand (op0, modev2);
26490 if (VECTOR_MODE_P (modev4))
26491 op2 = safe_vector_operand (op2, modev4);
26493 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26494 op0 = copy_to_mode_reg (modev2, op0);
26495 if (!insn_data[d->icode].operand[3].predicate (op1, modei3))
26496 op1 = copy_to_mode_reg (modei3, op1);
26497 if ((optimize && !register_operand (op2, modev4))
26498 || !insn_data[d->icode].operand[4].predicate (op2, modev4))
26499 op2 = copy_to_mode_reg (modev4, op2);
26500 if (!insn_data[d->icode].operand[5].predicate (op3, modei5))
26501 op3 = copy_to_mode_reg (modei5, op3);
26503 if (!insn_data[d->icode].operand[6].predicate (op4, modeimm))
26505 error ("the fifth argument must be a 8-bit immediate");
26509 if (d->code == IX86_BUILTIN_PCMPESTRI128)
26511 if (optimize || !target
26512 || GET_MODE (target) != tmode0
26513 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26514 target = gen_reg_rtx (tmode0);
26516 scratch1 = gen_reg_rtx (tmode1);
26518 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
26520 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
26522 if (optimize || !target
26523 || GET_MODE (target) != tmode1
26524 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26525 target = gen_reg_rtx (tmode1);
26527 scratch0 = gen_reg_rtx (tmode0);
26529 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
26533 gcc_assert (d->flag);
26535 scratch0 = gen_reg_rtx (tmode0);
26536 scratch1 = gen_reg_rtx (tmode1);
26538 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
26548 target = gen_reg_rtx (SImode);
26549 emit_move_insn (target, const0_rtx);
26550 target = gen_rtx_SUBREG (QImode, target, 0);
26553 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26554 gen_rtx_fmt_ee (EQ, QImode,
26555 gen_rtx_REG ((enum machine_mode) d->flag,
26558 return SUBREG_REG (target);
26565 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
26568 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
26569 tree exp, rtx target)
26572 tree arg0 = CALL_EXPR_ARG (exp, 0);
26573 tree arg1 = CALL_EXPR_ARG (exp, 1);
26574 tree arg2 = CALL_EXPR_ARG (exp, 2);
26575 rtx scratch0, scratch1;
26576 rtx op0 = expand_normal (arg0);
26577 rtx op1 = expand_normal (arg1);
26578 rtx op2 = expand_normal (arg2);
26579 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
26581 tmode0 = insn_data[d->icode].operand[0].mode;
26582 tmode1 = insn_data[d->icode].operand[1].mode;
26583 modev2 = insn_data[d->icode].operand[2].mode;
26584 modev3 = insn_data[d->icode].operand[3].mode;
26585 modeimm = insn_data[d->icode].operand[4].mode;
26587 if (VECTOR_MODE_P (modev2))
26588 op0 = safe_vector_operand (op0, modev2);
26589 if (VECTOR_MODE_P (modev3))
26590 op1 = safe_vector_operand (op1, modev3);
26592 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26593 op0 = copy_to_mode_reg (modev2, op0);
26594 if ((optimize && !register_operand (op1, modev3))
26595 || !insn_data[d->icode].operand[3].predicate (op1, modev3))
26596 op1 = copy_to_mode_reg (modev3, op1);
26598 if (!insn_data[d->icode].operand[4].predicate (op2, modeimm))
26600 error ("the third argument must be a 8-bit immediate");
26604 if (d->code == IX86_BUILTIN_PCMPISTRI128)
26606 if (optimize || !target
26607 || GET_MODE (target) != tmode0
26608 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26609 target = gen_reg_rtx (tmode0);
26611 scratch1 = gen_reg_rtx (tmode1);
26613 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
26615 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
26617 if (optimize || !target
26618 || GET_MODE (target) != tmode1
26619 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26620 target = gen_reg_rtx (tmode1);
26622 scratch0 = gen_reg_rtx (tmode0);
26624 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
26628 gcc_assert (d->flag);
26630 scratch0 = gen_reg_rtx (tmode0);
26631 scratch1 = gen_reg_rtx (tmode1);
26633 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
26643 target = gen_reg_rtx (SImode);
26644 emit_move_insn (target, const0_rtx);
26645 target = gen_rtx_SUBREG (QImode, target, 0);
26648 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26649 gen_rtx_fmt_ee (EQ, QImode,
26650 gen_rtx_REG ((enum machine_mode) d->flag,
26653 return SUBREG_REG (target);
26659 /* Subroutine of ix86_expand_builtin to take care of insns with
26660 variable number of operands. */
26663 ix86_expand_args_builtin (const struct builtin_description *d,
26664 tree exp, rtx target)
26666 rtx pat, real_target;
26667 unsigned int i, nargs;
26668 unsigned int nargs_constant = 0;
26669 int num_memory = 0;
26673 enum machine_mode mode;
26675 bool last_arg_count = false;
26676 enum insn_code icode = d->icode;
26677 const struct insn_data_d *insn_p = &insn_data[icode];
26678 enum machine_mode tmode = insn_p->operand[0].mode;
26679 enum machine_mode rmode = VOIDmode;
26681 enum rtx_code comparison = d->comparison;
26683 switch ((enum ix86_builtin_func_type) d->flag)
26685 case V2DF_FTYPE_V2DF_ROUND:
26686 case V4DF_FTYPE_V4DF_ROUND:
26687 case V4SF_FTYPE_V4SF_ROUND:
26688 case V8SF_FTYPE_V8SF_ROUND:
26689 return ix86_expand_sse_round (d, exp, target);
26690 case INT_FTYPE_V8SF_V8SF_PTEST:
26691 case INT_FTYPE_V4DI_V4DI_PTEST:
26692 case INT_FTYPE_V4DF_V4DF_PTEST:
26693 case INT_FTYPE_V4SF_V4SF_PTEST:
26694 case INT_FTYPE_V2DI_V2DI_PTEST:
26695 case INT_FTYPE_V2DF_V2DF_PTEST:
26696 return ix86_expand_sse_ptest (d, exp, target);
26697 case FLOAT128_FTYPE_FLOAT128:
26698 case FLOAT_FTYPE_FLOAT:
26699 case INT_FTYPE_INT:
26700 case UINT64_FTYPE_INT:
26701 case UINT16_FTYPE_UINT16:
26702 case INT64_FTYPE_INT64:
26703 case INT64_FTYPE_V4SF:
26704 case INT64_FTYPE_V2DF:
26705 case INT_FTYPE_V16QI:
26706 case INT_FTYPE_V8QI:
26707 case INT_FTYPE_V8SF:
26708 case INT_FTYPE_V4DF:
26709 case INT_FTYPE_V4SF:
26710 case INT_FTYPE_V2DF:
26711 case V16QI_FTYPE_V16QI:
26712 case V8SI_FTYPE_V8SF:
26713 case V8SI_FTYPE_V4SI:
26714 case V8HI_FTYPE_V8HI:
26715 case V8HI_FTYPE_V16QI:
26716 case V8QI_FTYPE_V8QI:
26717 case V8SF_FTYPE_V8SF:
26718 case V8SF_FTYPE_V8SI:
26719 case V8SF_FTYPE_V4SF:
26720 case V8SF_FTYPE_V8HI:
26721 case V4SI_FTYPE_V4SI:
26722 case V4SI_FTYPE_V16QI:
26723 case V4SI_FTYPE_V4SF:
26724 case V4SI_FTYPE_V8SI:
26725 case V4SI_FTYPE_V8HI:
26726 case V4SI_FTYPE_V4DF:
26727 case V4SI_FTYPE_V2DF:
26728 case V4HI_FTYPE_V4HI:
26729 case V4DF_FTYPE_V4DF:
26730 case V4DF_FTYPE_V4SI:
26731 case V4DF_FTYPE_V4SF:
26732 case V4DF_FTYPE_V2DF:
26733 case V4SF_FTYPE_V4SF:
26734 case V4SF_FTYPE_V4SI:
26735 case V4SF_FTYPE_V8SF:
26736 case V4SF_FTYPE_V4DF:
26737 case V4SF_FTYPE_V8HI:
26738 case V4SF_FTYPE_V2DF:
26739 case V2DI_FTYPE_V2DI:
26740 case V2DI_FTYPE_V16QI:
26741 case V2DI_FTYPE_V8HI:
26742 case V2DI_FTYPE_V4SI:
26743 case V2DF_FTYPE_V2DF:
26744 case V2DF_FTYPE_V4SI:
26745 case V2DF_FTYPE_V4DF:
26746 case V2DF_FTYPE_V4SF:
26747 case V2DF_FTYPE_V2SI:
26748 case V2SI_FTYPE_V2SI:
26749 case V2SI_FTYPE_V4SF:
26750 case V2SI_FTYPE_V2SF:
26751 case V2SI_FTYPE_V2DF:
26752 case V2SF_FTYPE_V2SF:
26753 case V2SF_FTYPE_V2SI:
26756 case V4SF_FTYPE_V4SF_VEC_MERGE:
26757 case V2DF_FTYPE_V2DF_VEC_MERGE:
26758 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
26759 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
26760 case V16QI_FTYPE_V16QI_V16QI:
26761 case V16QI_FTYPE_V8HI_V8HI:
26762 case V8QI_FTYPE_V8QI_V8QI:
26763 case V8QI_FTYPE_V4HI_V4HI:
26764 case V8HI_FTYPE_V8HI_V8HI:
26765 case V8HI_FTYPE_V16QI_V16QI:
26766 case V8HI_FTYPE_V4SI_V4SI:
26767 case V8SF_FTYPE_V8SF_V8SF:
26768 case V8SF_FTYPE_V8SF_V8SI:
26769 case V4SI_FTYPE_V4SI_V4SI:
26770 case V4SI_FTYPE_V8HI_V8HI:
26771 case V4SI_FTYPE_V4SF_V4SF:
26772 case V4SI_FTYPE_V2DF_V2DF:
26773 case V4HI_FTYPE_V4HI_V4HI:
26774 case V4HI_FTYPE_V8QI_V8QI:
26775 case V4HI_FTYPE_V2SI_V2SI:
26776 case V4DF_FTYPE_V4DF_V4DF:
26777 case V4DF_FTYPE_V4DF_V4DI:
26778 case V4SF_FTYPE_V4SF_V4SF:
26779 case V4SF_FTYPE_V4SF_V4SI:
26780 case V4SF_FTYPE_V4SF_V2SI:
26781 case V4SF_FTYPE_V4SF_V2DF:
26782 case V4SF_FTYPE_V4SF_DI:
26783 case V4SF_FTYPE_V4SF_SI:
26784 case V2DI_FTYPE_V2DI_V2DI:
26785 case V2DI_FTYPE_V16QI_V16QI:
26786 case V2DI_FTYPE_V4SI_V4SI:
26787 case V2DI_FTYPE_V2DI_V16QI:
26788 case V2DI_FTYPE_V2DF_V2DF:
26789 case V2SI_FTYPE_V2SI_V2SI:
26790 case V2SI_FTYPE_V4HI_V4HI:
26791 case V2SI_FTYPE_V2SF_V2SF:
26792 case V2DF_FTYPE_V2DF_V2DF:
26793 case V2DF_FTYPE_V2DF_V4SF:
26794 case V2DF_FTYPE_V2DF_V2DI:
26795 case V2DF_FTYPE_V2DF_DI:
26796 case V2DF_FTYPE_V2DF_SI:
26797 case V2SF_FTYPE_V2SF_V2SF:
26798 case V1DI_FTYPE_V1DI_V1DI:
26799 case V1DI_FTYPE_V8QI_V8QI:
26800 case V1DI_FTYPE_V2SI_V2SI:
26801 if (comparison == UNKNOWN)
26802 return ix86_expand_binop_builtin (icode, exp, target);
26805 case V4SF_FTYPE_V4SF_V4SF_SWAP:
26806 case V2DF_FTYPE_V2DF_V2DF_SWAP:
26807 gcc_assert (comparison != UNKNOWN);
26811 case V8HI_FTYPE_V8HI_V8HI_COUNT:
26812 case V8HI_FTYPE_V8HI_SI_COUNT:
26813 case V4SI_FTYPE_V4SI_V4SI_COUNT:
26814 case V4SI_FTYPE_V4SI_SI_COUNT:
26815 case V4HI_FTYPE_V4HI_V4HI_COUNT:
26816 case V4HI_FTYPE_V4HI_SI_COUNT:
26817 case V2DI_FTYPE_V2DI_V2DI_COUNT:
26818 case V2DI_FTYPE_V2DI_SI_COUNT:
26819 case V2SI_FTYPE_V2SI_V2SI_COUNT:
26820 case V2SI_FTYPE_V2SI_SI_COUNT:
26821 case V1DI_FTYPE_V1DI_V1DI_COUNT:
26822 case V1DI_FTYPE_V1DI_SI_COUNT:
26824 last_arg_count = true;
26826 case UINT64_FTYPE_UINT64_UINT64:
26827 case UINT_FTYPE_UINT_UINT:
26828 case UINT_FTYPE_UINT_USHORT:
26829 case UINT_FTYPE_UINT_UCHAR:
26830 case UINT16_FTYPE_UINT16_INT:
26831 case UINT8_FTYPE_UINT8_INT:
26834 case V2DI_FTYPE_V2DI_INT_CONVERT:
26837 nargs_constant = 1;
26839 case V8HI_FTYPE_V8HI_INT:
26840 case V8HI_FTYPE_V8SF_INT:
26841 case V8HI_FTYPE_V4SF_INT:
26842 case V8SF_FTYPE_V8SF_INT:
26843 case V4SI_FTYPE_V4SI_INT:
26844 case V4SI_FTYPE_V8SI_INT:
26845 case V4HI_FTYPE_V4HI_INT:
26846 case V4DF_FTYPE_V4DF_INT:
26847 case V4SF_FTYPE_V4SF_INT:
26848 case V4SF_FTYPE_V8SF_INT:
26849 case V2DI_FTYPE_V2DI_INT:
26850 case V2DF_FTYPE_V2DF_INT:
26851 case V2DF_FTYPE_V4DF_INT:
26853 nargs_constant = 1;
26855 case V16QI_FTYPE_V16QI_V16QI_V16QI:
26856 case V8SF_FTYPE_V8SF_V8SF_V8SF:
26857 case V4DF_FTYPE_V4DF_V4DF_V4DF:
26858 case V4SF_FTYPE_V4SF_V4SF_V4SF:
26859 case V2DF_FTYPE_V2DF_V2DF_V2DF:
26862 case V16QI_FTYPE_V16QI_V16QI_INT:
26863 case V8HI_FTYPE_V8HI_V8HI_INT:
26864 case V8SI_FTYPE_V8SI_V8SI_INT:
26865 case V8SI_FTYPE_V8SI_V4SI_INT:
26866 case V8SF_FTYPE_V8SF_V8SF_INT:
26867 case V8SF_FTYPE_V8SF_V4SF_INT:
26868 case V4SI_FTYPE_V4SI_V4SI_INT:
26869 case V4DF_FTYPE_V4DF_V4DF_INT:
26870 case V4DF_FTYPE_V4DF_V2DF_INT:
26871 case V4SF_FTYPE_V4SF_V4SF_INT:
26872 case V2DI_FTYPE_V2DI_V2DI_INT:
26873 case V2DF_FTYPE_V2DF_V2DF_INT:
26875 nargs_constant = 1;
26877 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
26880 nargs_constant = 1;
26882 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
26885 nargs_constant = 1;
26887 case V2DI_FTYPE_V2DI_UINT_UINT:
26889 nargs_constant = 2;
26891 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
26892 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
26893 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
26894 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
26896 nargs_constant = 1;
26898 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
26900 nargs_constant = 2;
26903 gcc_unreachable ();
26906 gcc_assert (nargs <= ARRAY_SIZE (args));
26908 if (comparison != UNKNOWN)
26910 gcc_assert (nargs == 2);
26911 return ix86_expand_sse_compare (d, exp, target, swap);
26914 if (rmode == VOIDmode || rmode == tmode)
26918 || GET_MODE (target) != tmode
26919 || !insn_p->operand[0].predicate (target, tmode))
26920 target = gen_reg_rtx (tmode);
26921 real_target = target;
26925 target = gen_reg_rtx (rmode);
26926 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
26929 for (i = 0; i < nargs; i++)
26931 tree arg = CALL_EXPR_ARG (exp, i);
26932 rtx op = expand_normal (arg);
26933 enum machine_mode mode = insn_p->operand[i + 1].mode;
26934 bool match = insn_p->operand[i + 1].predicate (op, mode);
26936 if (last_arg_count && (i + 1) == nargs)
26938 /* SIMD shift insns take either an 8-bit immediate or
26939 register as count. But builtin functions take int as
26940 count. If count doesn't match, we put it in register. */
26943 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
26944 if (!insn_p->operand[i + 1].predicate (op, mode))
26945 op = copy_to_reg (op);
26948 else if ((nargs - i) <= nargs_constant)
26953 case CODE_FOR_sse4_1_roundpd:
26954 case CODE_FOR_sse4_1_roundps:
26955 case CODE_FOR_sse4_1_roundsd:
26956 case CODE_FOR_sse4_1_roundss:
26957 case CODE_FOR_sse4_1_blendps:
26958 case CODE_FOR_avx_blendpd256:
26959 case CODE_FOR_avx_vpermilv4df:
26960 case CODE_FOR_avx_roundpd256:
26961 case CODE_FOR_avx_roundps256:
26962 error ("the last argument must be a 4-bit immediate");
26965 case CODE_FOR_sse4_1_blendpd:
26966 case CODE_FOR_avx_vpermilv2df:
26967 case CODE_FOR_xop_vpermil2v2df3:
26968 case CODE_FOR_xop_vpermil2v4sf3:
26969 case CODE_FOR_xop_vpermil2v4df3:
26970 case CODE_FOR_xop_vpermil2v8sf3:
26971 error ("the last argument must be a 2-bit immediate");
26974 case CODE_FOR_avx_vextractf128v4df:
26975 case CODE_FOR_avx_vextractf128v8sf:
26976 case CODE_FOR_avx_vextractf128v8si:
26977 case CODE_FOR_avx_vinsertf128v4df:
26978 case CODE_FOR_avx_vinsertf128v8sf:
26979 case CODE_FOR_avx_vinsertf128v8si:
26980 error ("the last argument must be a 1-bit immediate");
26983 case CODE_FOR_avx_vmcmpv2df3:
26984 case CODE_FOR_avx_vmcmpv4sf3:
26985 case CODE_FOR_avx_cmpv2df3:
26986 case CODE_FOR_avx_cmpv4sf3:
26987 case CODE_FOR_avx_cmpv4df3:
26988 case CODE_FOR_avx_cmpv8sf3:
26989 error ("the last argument must be a 5-bit immediate");
26993 switch (nargs_constant)
26996 if ((nargs - i) == nargs_constant)
26998 error ("the next to last argument must be an 8-bit immediate");
27002 error ("the last argument must be an 8-bit immediate");
27005 gcc_unreachable ();
27012 if (VECTOR_MODE_P (mode))
27013 op = safe_vector_operand (op, mode);
27015 /* If we aren't optimizing, only allow one memory operand to
27017 if (memory_operand (op, mode))
27020 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
27022 if (optimize || !match || num_memory > 1)
27023 op = copy_to_mode_reg (mode, op);
27027 op = copy_to_reg (op);
27028 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
27033 args[i].mode = mode;
27039 pat = GEN_FCN (icode) (real_target, args[0].op);
27042 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
27045 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
27049 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
27050 args[2].op, args[3].op);
27053 gcc_unreachable ();
27063 /* Subroutine of ix86_expand_builtin to take care of special insns
27064 with variable number of operands. */
27067 ix86_expand_special_args_builtin (const struct builtin_description *d,
27068 tree exp, rtx target)
27072 unsigned int i, nargs, arg_adjust, memory;
27076 enum machine_mode mode;
27078 enum insn_code icode = d->icode;
27079 bool last_arg_constant = false;
27080 const struct insn_data_d *insn_p = &insn_data[icode];
27081 enum machine_mode tmode = insn_p->operand[0].mode;
27082 enum { load, store } klass;
27084 switch ((enum ix86_builtin_func_type) d->flag)
27086 case VOID_FTYPE_VOID:
27087 if (icode == CODE_FOR_avx_vzeroupper)
27088 target = GEN_INT (vzeroupper_intrinsic);
27089 emit_insn (GEN_FCN (icode) (target));
27091 case VOID_FTYPE_UINT64:
27092 case VOID_FTYPE_UNSIGNED:
27098 case UINT64_FTYPE_VOID:
27099 case UNSIGNED_FTYPE_VOID:
27104 case UINT64_FTYPE_PUNSIGNED:
27105 case V2DI_FTYPE_PV2DI:
27106 case V32QI_FTYPE_PCCHAR:
27107 case V16QI_FTYPE_PCCHAR:
27108 case V8SF_FTYPE_PCV4SF:
27109 case V8SF_FTYPE_PCFLOAT:
27110 case V4SF_FTYPE_PCFLOAT:
27111 case V4DF_FTYPE_PCV2DF:
27112 case V4DF_FTYPE_PCDOUBLE:
27113 case V2DF_FTYPE_PCDOUBLE:
27114 case VOID_FTYPE_PVOID:
27119 case VOID_FTYPE_PV2SF_V4SF:
27120 case VOID_FTYPE_PV4DI_V4DI:
27121 case VOID_FTYPE_PV2DI_V2DI:
27122 case VOID_FTYPE_PCHAR_V32QI:
27123 case VOID_FTYPE_PCHAR_V16QI:
27124 case VOID_FTYPE_PFLOAT_V8SF:
27125 case VOID_FTYPE_PFLOAT_V4SF:
27126 case VOID_FTYPE_PDOUBLE_V4DF:
27127 case VOID_FTYPE_PDOUBLE_V2DF:
27128 case VOID_FTYPE_PULONGLONG_ULONGLONG:
27129 case VOID_FTYPE_PINT_INT:
27132 /* Reserve memory operand for target. */
27133 memory = ARRAY_SIZE (args);
27135 case V4SF_FTYPE_V4SF_PCV2SF:
27136 case V2DF_FTYPE_V2DF_PCDOUBLE:
27141 case V8SF_FTYPE_PCV8SF_V8SI:
27142 case V4DF_FTYPE_PCV4DF_V4DI:
27143 case V4SF_FTYPE_PCV4SF_V4SI:
27144 case V2DF_FTYPE_PCV2DF_V2DI:
27149 case VOID_FTYPE_PV8SF_V8SI_V8SF:
27150 case VOID_FTYPE_PV4DF_V4DI_V4DF:
27151 case VOID_FTYPE_PV4SF_V4SI_V4SF:
27152 case VOID_FTYPE_PV2DF_V2DI_V2DF:
27155 /* Reserve memory operand for target. */
27156 memory = ARRAY_SIZE (args);
27158 case VOID_FTYPE_UINT_UINT_UINT:
27159 case VOID_FTYPE_UINT64_UINT_UINT:
27160 case UCHAR_FTYPE_UINT_UINT_UINT:
27161 case UCHAR_FTYPE_UINT64_UINT_UINT:
27164 memory = ARRAY_SIZE (args);
27165 last_arg_constant = true;
27168 gcc_unreachable ();
27171 gcc_assert (nargs <= ARRAY_SIZE (args));
27173 if (klass == store)
27175 arg = CALL_EXPR_ARG (exp, 0);
27176 op = expand_normal (arg);
27177 gcc_assert (target == 0);
27179 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
27181 target = force_reg (tmode, op);
27189 || GET_MODE (target) != tmode
27190 || !insn_p->operand[0].predicate (target, tmode))
27191 target = gen_reg_rtx (tmode);
27194 for (i = 0; i < nargs; i++)
27196 enum machine_mode mode = insn_p->operand[i + 1].mode;
27199 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
27200 op = expand_normal (arg);
27201 match = insn_p->operand[i + 1].predicate (op, mode);
27203 if (last_arg_constant && (i + 1) == nargs)
27207 if (icode == CODE_FOR_lwp_lwpvalsi3
27208 || icode == CODE_FOR_lwp_lwpinssi3
27209 || icode == CODE_FOR_lwp_lwpvaldi3
27210 || icode == CODE_FOR_lwp_lwpinsdi3)
27211 error ("the last argument must be a 32-bit immediate");
27213 error ("the last argument must be an 8-bit immediate");
27221 /* This must be the memory operand. */
27222 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
27223 gcc_assert (GET_MODE (op) == mode
27224 || GET_MODE (op) == VOIDmode);
27228 /* This must be register. */
27229 if (VECTOR_MODE_P (mode))
27230 op = safe_vector_operand (op, mode);
27232 gcc_assert (GET_MODE (op) == mode
27233 || GET_MODE (op) == VOIDmode);
27234 op = copy_to_mode_reg (mode, op);
27239 args[i].mode = mode;
27245 pat = GEN_FCN (icode) (target);
27248 pat = GEN_FCN (icode) (target, args[0].op);
27251 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
27254 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
27257 gcc_unreachable ();
27263 return klass == store ? 0 : target;
27266 /* Return the integer constant in ARG. Constrain it to be in the range
27267 of the subparts of VEC_TYPE; issue an error if not. */
27270 get_element_number (tree vec_type, tree arg)
27272 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
27274 if (!host_integerp (arg, 1)
27275 || (elt = tree_low_cst (arg, 1), elt > max))
27277 error ("selector must be an integer constant in the range 0..%wi", max);
27284 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27285 ix86_expand_vector_init. We DO have language-level syntax for this, in
27286 the form of (type){ init-list }. Except that since we can't place emms
27287 instructions from inside the compiler, we can't allow the use of MMX
27288 registers unless the user explicitly asks for it. So we do *not* define
27289 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
27290 we have builtins invoked by mmintrin.h that gives us license to emit
27291 these sorts of instructions. */
27294 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
27296 enum machine_mode tmode = TYPE_MODE (type);
27297 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
27298 int i, n_elt = GET_MODE_NUNITS (tmode);
27299 rtvec v = rtvec_alloc (n_elt);
27301 gcc_assert (VECTOR_MODE_P (tmode));
27302 gcc_assert (call_expr_nargs (exp) == n_elt);
27304 for (i = 0; i < n_elt; ++i)
27306 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
27307 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
27310 if (!target || !register_operand (target, tmode))
27311 target = gen_reg_rtx (tmode);
27313 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
27317 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27318 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
27319 had a language-level syntax for referencing vector elements. */
27322 ix86_expand_vec_ext_builtin (tree exp, rtx target)
27324 enum machine_mode tmode, mode0;
27329 arg0 = CALL_EXPR_ARG (exp, 0);
27330 arg1 = CALL_EXPR_ARG (exp, 1);
27332 op0 = expand_normal (arg0);
27333 elt = get_element_number (TREE_TYPE (arg0), arg1);
27335 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
27336 mode0 = TYPE_MODE (TREE_TYPE (arg0));
27337 gcc_assert (VECTOR_MODE_P (mode0));
27339 op0 = force_reg (mode0, op0);
27341 if (optimize || !target || !register_operand (target, tmode))
27342 target = gen_reg_rtx (tmode);
27344 ix86_expand_vector_extract (true, target, op0, elt);
27349 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
27350 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
27351 a language-level syntax for referencing vector elements. */
27354 ix86_expand_vec_set_builtin (tree exp)
27356 enum machine_mode tmode, mode1;
27357 tree arg0, arg1, arg2;
27359 rtx op0, op1, target;
27361 arg0 = CALL_EXPR_ARG (exp, 0);
27362 arg1 = CALL_EXPR_ARG (exp, 1);
27363 arg2 = CALL_EXPR_ARG (exp, 2);
27365 tmode = TYPE_MODE (TREE_TYPE (arg0));
27366 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
27367 gcc_assert (VECTOR_MODE_P (tmode));
27369 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
27370 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
27371 elt = get_element_number (TREE_TYPE (arg0), arg2);
27373 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
27374 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
27376 op0 = force_reg (tmode, op0);
27377 op1 = force_reg (mode1, op1);
27379 /* OP0 is the source of these builtin functions and shouldn't be
27380 modified. Create a copy, use it and return it as target. */
27381 target = gen_reg_rtx (tmode);
27382 emit_move_insn (target, op0);
27383 ix86_expand_vector_set (true, target, op1, elt);
27388 /* Expand an expression EXP that calls a built-in function,
27389 with result going to TARGET if that's convenient
27390 (and in mode MODE if that's convenient).
27391 SUBTARGET may be used as the target for computing one of EXP's operands.
27392 IGNORE is nonzero if the value is to be ignored. */
27395 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
27396 enum machine_mode mode ATTRIBUTE_UNUSED,
27397 int ignore ATTRIBUTE_UNUSED)
27399 const struct builtin_description *d;
27401 enum insn_code icode;
27402 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
27403 tree arg0, arg1, arg2;
27404 rtx op0, op1, op2, pat;
27405 enum machine_mode mode0, mode1, mode2;
27406 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
27408 /* Determine whether the builtin function is available under the current ISA.
27409 Originally the builtin was not created if it wasn't applicable to the
27410 current ISA based on the command line switches. With function specific
27411 options, we need to check in the context of the function making the call
27412 whether it is supported. */
27413 if (ix86_builtins_isa[fcode].isa
27414 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
27416 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
27417 NULL, NULL, false);
27420 error ("%qE needs unknown isa option", fndecl);
27423 gcc_assert (opts != NULL);
27424 error ("%qE needs isa option %s", fndecl, opts);
27432 case IX86_BUILTIN_MASKMOVQ:
27433 case IX86_BUILTIN_MASKMOVDQU:
27434 icode = (fcode == IX86_BUILTIN_MASKMOVQ
27435 ? CODE_FOR_mmx_maskmovq
27436 : CODE_FOR_sse2_maskmovdqu);
27437 /* Note the arg order is different from the operand order. */
27438 arg1 = CALL_EXPR_ARG (exp, 0);
27439 arg2 = CALL_EXPR_ARG (exp, 1);
27440 arg0 = CALL_EXPR_ARG (exp, 2);
27441 op0 = expand_normal (arg0);
27442 op1 = expand_normal (arg1);
27443 op2 = expand_normal (arg2);
27444 mode0 = insn_data[icode].operand[0].mode;
27445 mode1 = insn_data[icode].operand[1].mode;
27446 mode2 = insn_data[icode].operand[2].mode;
27448 op0 = force_reg (Pmode, op0);
27449 op0 = gen_rtx_MEM (mode1, op0);
27451 if (!insn_data[icode].operand[0].predicate (op0, mode0))
27452 op0 = copy_to_mode_reg (mode0, op0);
27453 if (!insn_data[icode].operand[1].predicate (op1, mode1))
27454 op1 = copy_to_mode_reg (mode1, op1);
27455 if (!insn_data[icode].operand[2].predicate (op2, mode2))
27456 op2 = copy_to_mode_reg (mode2, op2);
27457 pat = GEN_FCN (icode) (op0, op1, op2);
27463 case IX86_BUILTIN_LDMXCSR:
27464 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
27465 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27466 emit_move_insn (target, op0);
27467 emit_insn (gen_sse_ldmxcsr (target));
27470 case IX86_BUILTIN_STMXCSR:
27471 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27472 emit_insn (gen_sse_stmxcsr (target));
27473 return copy_to_mode_reg (SImode, target);
27475 case IX86_BUILTIN_CLFLUSH:
27476 arg0 = CALL_EXPR_ARG (exp, 0);
27477 op0 = expand_normal (arg0);
27478 icode = CODE_FOR_sse2_clflush;
27479 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27480 op0 = copy_to_mode_reg (Pmode, op0);
27482 emit_insn (gen_sse2_clflush (op0));
27485 case IX86_BUILTIN_MONITOR:
27486 arg0 = CALL_EXPR_ARG (exp, 0);
27487 arg1 = CALL_EXPR_ARG (exp, 1);
27488 arg2 = CALL_EXPR_ARG (exp, 2);
27489 op0 = expand_normal (arg0);
27490 op1 = expand_normal (arg1);
27491 op2 = expand_normal (arg2);
27493 op0 = copy_to_mode_reg (Pmode, op0);
27495 op1 = copy_to_mode_reg (SImode, op1);
27497 op2 = copy_to_mode_reg (SImode, op2);
27498 emit_insn (ix86_gen_monitor (op0, op1, op2));
27501 case IX86_BUILTIN_MWAIT:
27502 arg0 = CALL_EXPR_ARG (exp, 0);
27503 arg1 = CALL_EXPR_ARG (exp, 1);
27504 op0 = expand_normal (arg0);
27505 op1 = expand_normal (arg1);
27507 op0 = copy_to_mode_reg (SImode, op0);
27509 op1 = copy_to_mode_reg (SImode, op1);
27510 emit_insn (gen_sse3_mwait (op0, op1));
27513 case IX86_BUILTIN_VEC_INIT_V2SI:
27514 case IX86_BUILTIN_VEC_INIT_V4HI:
27515 case IX86_BUILTIN_VEC_INIT_V8QI:
27516 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
27518 case IX86_BUILTIN_VEC_EXT_V2DF:
27519 case IX86_BUILTIN_VEC_EXT_V2DI:
27520 case IX86_BUILTIN_VEC_EXT_V4SF:
27521 case IX86_BUILTIN_VEC_EXT_V4SI:
27522 case IX86_BUILTIN_VEC_EXT_V8HI:
27523 case IX86_BUILTIN_VEC_EXT_V2SI:
27524 case IX86_BUILTIN_VEC_EXT_V4HI:
27525 case IX86_BUILTIN_VEC_EXT_V16QI:
27526 return ix86_expand_vec_ext_builtin (exp, target);
27528 case IX86_BUILTIN_VEC_SET_V2DI:
27529 case IX86_BUILTIN_VEC_SET_V4SF:
27530 case IX86_BUILTIN_VEC_SET_V4SI:
27531 case IX86_BUILTIN_VEC_SET_V8HI:
27532 case IX86_BUILTIN_VEC_SET_V4HI:
27533 case IX86_BUILTIN_VEC_SET_V16QI:
27534 return ix86_expand_vec_set_builtin (exp);
27536 case IX86_BUILTIN_VEC_PERM_V2DF:
27537 case IX86_BUILTIN_VEC_PERM_V4SF:
27538 case IX86_BUILTIN_VEC_PERM_V2DI:
27539 case IX86_BUILTIN_VEC_PERM_V4SI:
27540 case IX86_BUILTIN_VEC_PERM_V8HI:
27541 case IX86_BUILTIN_VEC_PERM_V16QI:
27542 case IX86_BUILTIN_VEC_PERM_V2DI_U:
27543 case IX86_BUILTIN_VEC_PERM_V4SI_U:
27544 case IX86_BUILTIN_VEC_PERM_V8HI_U:
27545 case IX86_BUILTIN_VEC_PERM_V16QI_U:
27546 case IX86_BUILTIN_VEC_PERM_V4DF:
27547 case IX86_BUILTIN_VEC_PERM_V8SF:
27548 return ix86_expand_vec_perm_builtin (exp);
27550 case IX86_BUILTIN_INFQ:
27551 case IX86_BUILTIN_HUGE_VALQ:
27553 REAL_VALUE_TYPE inf;
27557 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
27559 tmp = validize_mem (force_const_mem (mode, tmp));
27562 target = gen_reg_rtx (mode);
27564 emit_move_insn (target, tmp);
27568 case IX86_BUILTIN_LLWPCB:
27569 arg0 = CALL_EXPR_ARG (exp, 0);
27570 op0 = expand_normal (arg0);
27571 icode = CODE_FOR_lwp_llwpcb;
27572 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27573 op0 = copy_to_mode_reg (Pmode, op0);
27574 emit_insn (gen_lwp_llwpcb (op0));
27577 case IX86_BUILTIN_SLWPCB:
27578 icode = CODE_FOR_lwp_slwpcb;
27580 || !insn_data[icode].operand[0].predicate (target, Pmode))
27581 target = gen_reg_rtx (Pmode);
27582 emit_insn (gen_lwp_slwpcb (target));
27585 case IX86_BUILTIN_BEXTRI32:
27586 case IX86_BUILTIN_BEXTRI64:
27587 arg0 = CALL_EXPR_ARG (exp, 0);
27588 arg1 = CALL_EXPR_ARG (exp, 1);
27589 op0 = expand_normal (arg0);
27590 op1 = expand_normal (arg1);
27591 icode = (fcode == IX86_BUILTIN_BEXTRI32
27592 ? CODE_FOR_tbm_bextri_si
27593 : CODE_FOR_tbm_bextri_di);
27594 if (!CONST_INT_P (op1))
27596 error ("last argument must be an immediate");
27601 unsigned char length = (INTVAL (op1) >> 8) & 0xFF;
27602 unsigned char lsb_index = INTVAL (op1) & 0xFF;
27603 op1 = GEN_INT (length);
27604 op2 = GEN_INT (lsb_index);
27605 pat = GEN_FCN (icode) (target, op0, op1, op2);
27611 case IX86_BUILTIN_RDRAND16_STEP:
27612 icode = CODE_FOR_rdrandhi_1;
27616 case IX86_BUILTIN_RDRAND32_STEP:
27617 icode = CODE_FOR_rdrandsi_1;
27621 case IX86_BUILTIN_RDRAND64_STEP:
27622 icode = CODE_FOR_rdranddi_1;
27626 op0 = gen_reg_rtx (mode0);
27627 emit_insn (GEN_FCN (icode) (op0));
27629 op1 = gen_reg_rtx (SImode);
27630 emit_move_insn (op1, CONST1_RTX (SImode));
27632 /* Emit SImode conditional move. */
27633 if (mode0 == HImode)
27635 op2 = gen_reg_rtx (SImode);
27636 emit_insn (gen_zero_extendhisi2 (op2, op0));
27638 else if (mode0 == SImode)
27641 op2 = gen_rtx_SUBREG (SImode, op0, 0);
27643 pat = gen_rtx_GEU (VOIDmode, gen_rtx_REG (CCCmode, FLAGS_REG),
27645 emit_insn (gen_rtx_SET (VOIDmode, op1,
27646 gen_rtx_IF_THEN_ELSE (SImode, pat, op2, op1)));
27647 emit_move_insn (target, op1);
27649 arg0 = CALL_EXPR_ARG (exp, 0);
27650 op1 = expand_normal (arg0);
27651 if (!address_operand (op1, VOIDmode))
27652 op1 = copy_addr_to_reg (op1);
27653 emit_move_insn (gen_rtx_MEM (mode0, op1), op0);
27660 for (i = 0, d = bdesc_special_args;
27661 i < ARRAY_SIZE (bdesc_special_args);
27663 if (d->code == fcode)
27664 return ix86_expand_special_args_builtin (d, exp, target);
27666 for (i = 0, d = bdesc_args;
27667 i < ARRAY_SIZE (bdesc_args);
27669 if (d->code == fcode)
27672 case IX86_BUILTIN_FABSQ:
27673 case IX86_BUILTIN_COPYSIGNQ:
27675 /* Emit a normal call if SSE2 isn't available. */
27676 return expand_call (exp, target, ignore);
27678 return ix86_expand_args_builtin (d, exp, target);
27681 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
27682 if (d->code == fcode)
27683 return ix86_expand_sse_comi (d, exp, target);
27685 for (i = 0, d = bdesc_pcmpestr;
27686 i < ARRAY_SIZE (bdesc_pcmpestr);
27688 if (d->code == fcode)
27689 return ix86_expand_sse_pcmpestr (d, exp, target);
27691 for (i = 0, d = bdesc_pcmpistr;
27692 i < ARRAY_SIZE (bdesc_pcmpistr);
27694 if (d->code == fcode)
27695 return ix86_expand_sse_pcmpistr (d, exp, target);
27697 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
27698 if (d->code == fcode)
27699 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
27700 (enum ix86_builtin_func_type)
27701 d->flag, d->comparison);
27703 gcc_unreachable ();
27706 /* Returns a function decl for a vectorized version of the builtin function
27707 with builtin function code FN and the result vector type TYPE, or NULL_TREE
27708 if it is not available. */
27711 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
27714 enum machine_mode in_mode, out_mode;
27716 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
27718 if (TREE_CODE (type_out) != VECTOR_TYPE
27719 || TREE_CODE (type_in) != VECTOR_TYPE
27720 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
27723 out_mode = TYPE_MODE (TREE_TYPE (type_out));
27724 out_n = TYPE_VECTOR_SUBPARTS (type_out);
27725 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27726 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27730 case BUILT_IN_SQRT:
27731 if (out_mode == DFmode && in_mode == DFmode)
27733 if (out_n == 2 && in_n == 2)
27734 return ix86_builtins[IX86_BUILTIN_SQRTPD];
27735 else if (out_n == 4 && in_n == 4)
27736 return ix86_builtins[IX86_BUILTIN_SQRTPD256];
27740 case BUILT_IN_SQRTF:
27741 if (out_mode == SFmode && in_mode == SFmode)
27743 if (out_n == 4 && in_n == 4)
27744 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
27745 else if (out_n == 8 && in_n == 8)
27746 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR256];
27750 case BUILT_IN_LRINT:
27751 if (out_mode == SImode && out_n == 4
27752 && in_mode == DFmode && in_n == 2)
27753 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
27756 case BUILT_IN_LRINTF:
27757 if (out_mode == SImode && in_mode == SFmode)
27759 if (out_n == 4 && in_n == 4)
27760 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
27761 else if (out_n == 8 && in_n == 8)
27762 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ256];
27766 case BUILT_IN_COPYSIGN:
27767 if (out_mode == DFmode && in_mode == DFmode)
27769 if (out_n == 2 && in_n == 2)
27770 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
27771 else if (out_n == 4 && in_n == 4)
27772 return ix86_builtins[IX86_BUILTIN_CPYSGNPD256];
27776 case BUILT_IN_COPYSIGNF:
27777 if (out_mode == SFmode && in_mode == SFmode)
27779 if (out_n == 4 && in_n == 4)
27780 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
27781 else if (out_n == 8 && in_n == 8)
27782 return ix86_builtins[IX86_BUILTIN_CPYSGNPS256];
27786 case BUILT_IN_FLOOR:
27787 /* The round insn does not trap on denormals. */
27788 if (flag_trapping_math || !TARGET_ROUND)
27791 if (out_mode == DFmode && in_mode == DFmode)
27793 if (out_n == 2 && in_n == 2)
27794 return ix86_builtins[IX86_BUILTIN_FLOORPD];
27795 else if (out_n == 4 && in_n == 4)
27796 return ix86_builtins[IX86_BUILTIN_FLOORPD256];
27800 case BUILT_IN_FLOORF:
27801 /* The round insn does not trap on denormals. */
27802 if (flag_trapping_math || !TARGET_ROUND)
27805 if (out_mode == SFmode && in_mode == SFmode)
27807 if (out_n == 4 && in_n == 4)
27808 return ix86_builtins[IX86_BUILTIN_FLOORPS];
27809 else if (out_n == 8 && in_n == 8)
27810 return ix86_builtins[IX86_BUILTIN_FLOORPS256];
27814 case BUILT_IN_CEIL:
27815 /* The round insn does not trap on denormals. */
27816 if (flag_trapping_math || !TARGET_ROUND)
27819 if (out_mode == DFmode && in_mode == DFmode)
27821 if (out_n == 2 && in_n == 2)
27822 return ix86_builtins[IX86_BUILTIN_CEILPD];
27823 else if (out_n == 4 && in_n == 4)
27824 return ix86_builtins[IX86_BUILTIN_CEILPD256];
27828 case BUILT_IN_CEILF:
27829 /* The round insn does not trap on denormals. */
27830 if (flag_trapping_math || !TARGET_ROUND)
27833 if (out_mode == SFmode && in_mode == SFmode)
27835 if (out_n == 4 && in_n == 4)
27836 return ix86_builtins[IX86_BUILTIN_CEILPS];
27837 else if (out_n == 8 && in_n == 8)
27838 return ix86_builtins[IX86_BUILTIN_CEILPS256];
27842 case BUILT_IN_TRUNC:
27843 /* The round insn does not trap on denormals. */
27844 if (flag_trapping_math || !TARGET_ROUND)
27847 if (out_mode == DFmode && in_mode == DFmode)
27849 if (out_n == 2 && in_n == 2)
27850 return ix86_builtins[IX86_BUILTIN_TRUNCPD];
27851 else if (out_n == 4 && in_n == 4)
27852 return ix86_builtins[IX86_BUILTIN_TRUNCPD256];
27856 case BUILT_IN_TRUNCF:
27857 /* The round insn does not trap on denormals. */
27858 if (flag_trapping_math || !TARGET_ROUND)
27861 if (out_mode == SFmode && in_mode == SFmode)
27863 if (out_n == 4 && in_n == 4)
27864 return ix86_builtins[IX86_BUILTIN_TRUNCPS];
27865 else if (out_n == 8 && in_n == 8)
27866 return ix86_builtins[IX86_BUILTIN_TRUNCPS256];
27870 case BUILT_IN_RINT:
27871 /* The round insn does not trap on denormals. */
27872 if (flag_trapping_math || !TARGET_ROUND)
27875 if (out_mode == DFmode && in_mode == DFmode)
27877 if (out_n == 2 && in_n == 2)
27878 return ix86_builtins[IX86_BUILTIN_RINTPD];
27879 else if (out_n == 4 && in_n == 4)
27880 return ix86_builtins[IX86_BUILTIN_RINTPD256];
27884 case BUILT_IN_RINTF:
27885 /* The round insn does not trap on denormals. */
27886 if (flag_trapping_math || !TARGET_ROUND)
27889 if (out_mode == SFmode && in_mode == SFmode)
27891 if (out_n == 4 && in_n == 4)
27892 return ix86_builtins[IX86_BUILTIN_RINTPS];
27893 else if (out_n == 8 && in_n == 8)
27894 return ix86_builtins[IX86_BUILTIN_RINTPS256];
27899 if (out_mode == DFmode && in_mode == DFmode)
27901 if (out_n == 2 && in_n == 2)
27902 return ix86_builtins[IX86_BUILTIN_VFMADDPD];
27903 if (out_n == 4 && in_n == 4)
27904 return ix86_builtins[IX86_BUILTIN_VFMADDPD256];
27908 case BUILT_IN_FMAF:
27909 if (out_mode == SFmode && in_mode == SFmode)
27911 if (out_n == 4 && in_n == 4)
27912 return ix86_builtins[IX86_BUILTIN_VFMADDPS];
27913 if (out_n == 8 && in_n == 8)
27914 return ix86_builtins[IX86_BUILTIN_VFMADDPS256];
27922 /* Dispatch to a handler for a vectorization library. */
27923 if (ix86_veclib_handler)
27924 return ix86_veclib_handler ((enum built_in_function) fn, type_out,
27930 /* Handler for an SVML-style interface to
27931 a library with vectorized intrinsics. */
27934 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
27937 tree fntype, new_fndecl, args;
27940 enum machine_mode el_mode, in_mode;
27943 /* The SVML is suitable for unsafe math only. */
27944 if (!flag_unsafe_math_optimizations)
27947 el_mode = TYPE_MODE (TREE_TYPE (type_out));
27948 n = TYPE_VECTOR_SUBPARTS (type_out);
27949 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27950 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27951 if (el_mode != in_mode
27959 case BUILT_IN_LOG10:
27961 case BUILT_IN_TANH:
27963 case BUILT_IN_ATAN:
27964 case BUILT_IN_ATAN2:
27965 case BUILT_IN_ATANH:
27966 case BUILT_IN_CBRT:
27967 case BUILT_IN_SINH:
27969 case BUILT_IN_ASINH:
27970 case BUILT_IN_ASIN:
27971 case BUILT_IN_COSH:
27973 case BUILT_IN_ACOSH:
27974 case BUILT_IN_ACOS:
27975 if (el_mode != DFmode || n != 2)
27979 case BUILT_IN_EXPF:
27980 case BUILT_IN_LOGF:
27981 case BUILT_IN_LOG10F:
27982 case BUILT_IN_POWF:
27983 case BUILT_IN_TANHF:
27984 case BUILT_IN_TANF:
27985 case BUILT_IN_ATANF:
27986 case BUILT_IN_ATAN2F:
27987 case BUILT_IN_ATANHF:
27988 case BUILT_IN_CBRTF:
27989 case BUILT_IN_SINHF:
27990 case BUILT_IN_SINF:
27991 case BUILT_IN_ASINHF:
27992 case BUILT_IN_ASINF:
27993 case BUILT_IN_COSHF:
27994 case BUILT_IN_COSF:
27995 case BUILT_IN_ACOSHF:
27996 case BUILT_IN_ACOSF:
27997 if (el_mode != SFmode || n != 4)
28005 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
28007 if (fn == BUILT_IN_LOGF)
28008 strcpy (name, "vmlsLn4");
28009 else if (fn == BUILT_IN_LOG)
28010 strcpy (name, "vmldLn2");
28013 sprintf (name, "vmls%s", bname+10);
28014 name[strlen (name)-1] = '4';
28017 sprintf (name, "vmld%s2", bname+10);
28019 /* Convert to uppercase. */
28023 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
28024 args = TREE_CHAIN (args))
28028 fntype = build_function_type_list (type_out, type_in, NULL);
28030 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
28032 /* Build a function declaration for the vectorized function. */
28033 new_fndecl = build_decl (BUILTINS_LOCATION,
28034 FUNCTION_DECL, get_identifier (name), fntype);
28035 TREE_PUBLIC (new_fndecl) = 1;
28036 DECL_EXTERNAL (new_fndecl) = 1;
28037 DECL_IS_NOVOPS (new_fndecl) = 1;
28038 TREE_READONLY (new_fndecl) = 1;
28043 /* Handler for an ACML-style interface to
28044 a library with vectorized intrinsics. */
28047 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
28049 char name[20] = "__vr.._";
28050 tree fntype, new_fndecl, args;
28053 enum machine_mode el_mode, in_mode;
28056 /* The ACML is 64bits only and suitable for unsafe math only as
28057 it does not correctly support parts of IEEE with the required
28058 precision such as denormals. */
28060 || !flag_unsafe_math_optimizations)
28063 el_mode = TYPE_MODE (TREE_TYPE (type_out));
28064 n = TYPE_VECTOR_SUBPARTS (type_out);
28065 in_mode = TYPE_MODE (TREE_TYPE (type_in));
28066 in_n = TYPE_VECTOR_SUBPARTS (type_in);
28067 if (el_mode != in_mode
28077 case BUILT_IN_LOG2:
28078 case BUILT_IN_LOG10:
28081 if (el_mode != DFmode
28086 case BUILT_IN_SINF:
28087 case BUILT_IN_COSF:
28088 case BUILT_IN_EXPF:
28089 case BUILT_IN_POWF:
28090 case BUILT_IN_LOGF:
28091 case BUILT_IN_LOG2F:
28092 case BUILT_IN_LOG10F:
28095 if (el_mode != SFmode
28104 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
28105 sprintf (name + 7, "%s", bname+10);
28108 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
28109 args = TREE_CHAIN (args))
28113 fntype = build_function_type_list (type_out, type_in, NULL);
28115 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
28117 /* Build a function declaration for the vectorized function. */
28118 new_fndecl = build_decl (BUILTINS_LOCATION,
28119 FUNCTION_DECL, get_identifier (name), fntype);
28120 TREE_PUBLIC (new_fndecl) = 1;
28121 DECL_EXTERNAL (new_fndecl) = 1;
28122 DECL_IS_NOVOPS (new_fndecl) = 1;
28123 TREE_READONLY (new_fndecl) = 1;
28129 /* Returns a decl of a function that implements conversion of an integer vector
28130 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
28131 are the types involved when converting according to CODE.
28132 Return NULL_TREE if it is not available. */
28135 ix86_vectorize_builtin_conversion (unsigned int code,
28136 tree dest_type, tree src_type)
28144 switch (TYPE_MODE (src_type))
28147 switch (TYPE_MODE (dest_type))
28150 return (TYPE_UNSIGNED (src_type)
28151 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
28152 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
28154 return (TYPE_UNSIGNED (src_type)
28156 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
28162 switch (TYPE_MODE (dest_type))
28165 return (TYPE_UNSIGNED (src_type)
28167 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS256]);
28176 case FIX_TRUNC_EXPR:
28177 switch (TYPE_MODE (dest_type))
28180 switch (TYPE_MODE (src_type))
28183 return (TYPE_UNSIGNED (dest_type)
28185 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
28187 return (TYPE_UNSIGNED (dest_type)
28189 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
28196 switch (TYPE_MODE (src_type))
28199 return (TYPE_UNSIGNED (dest_type)
28201 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
28218 /* Returns a code for a target-specific builtin that implements
28219 reciprocal of the function, or NULL_TREE if not available. */
28222 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
28223 bool sqrt ATTRIBUTE_UNUSED)
28225 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
28226 && flag_finite_math_only && !flag_trapping_math
28227 && flag_unsafe_math_optimizations))
28231 /* Machine dependent builtins. */
28234 /* Vectorized version of sqrt to rsqrt conversion. */
28235 case IX86_BUILTIN_SQRTPS_NR:
28236 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
28238 case IX86_BUILTIN_SQRTPS_NR256:
28239 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR256];
28245 /* Normal builtins. */
28248 /* Sqrt to rsqrt conversion. */
28249 case BUILT_IN_SQRTF:
28250 return ix86_builtins[IX86_BUILTIN_RSQRTF];
28257 /* Helper for avx_vpermilps256_operand et al. This is also used by
28258 the expansion functions to turn the parallel back into a mask.
28259 The return value is 0 for no match and the imm8+1 for a match. */
28262 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
28264 unsigned i, nelt = GET_MODE_NUNITS (mode);
28266 unsigned char ipar[8];
28268 if (XVECLEN (par, 0) != (int) nelt)
28271 /* Validate that all of the elements are constants, and not totally
28272 out of range. Copy the data into an integral array to make the
28273 subsequent checks easier. */
28274 for (i = 0; i < nelt; ++i)
28276 rtx er = XVECEXP (par, 0, i);
28277 unsigned HOST_WIDE_INT ei;
28279 if (!CONST_INT_P (er))
28290 /* In the 256-bit DFmode case, we can only move elements within
28292 for (i = 0; i < 2; ++i)
28296 mask |= ipar[i] << i;
28298 for (i = 2; i < 4; ++i)
28302 mask |= (ipar[i] - 2) << i;
28307 /* In the 256-bit SFmode case, we have full freedom of movement
28308 within the low 128-bit lane, but the high 128-bit lane must
28309 mirror the exact same pattern. */
28310 for (i = 0; i < 4; ++i)
28311 if (ipar[i] + 4 != ipar[i + 4])
28318 /* In the 128-bit case, we've full freedom in the placement of
28319 the elements from the source operand. */
28320 for (i = 0; i < nelt; ++i)
28321 mask |= ipar[i] << (i * (nelt / 2));
28325 gcc_unreachable ();
28328 /* Make sure success has a non-zero value by adding one. */
28332 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
28333 the expansion functions to turn the parallel back into a mask.
28334 The return value is 0 for no match and the imm8+1 for a match. */
28337 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
28339 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
28341 unsigned char ipar[8];
28343 if (XVECLEN (par, 0) != (int) nelt)
28346 /* Validate that all of the elements are constants, and not totally
28347 out of range. Copy the data into an integral array to make the
28348 subsequent checks easier. */
28349 for (i = 0; i < nelt; ++i)
28351 rtx er = XVECEXP (par, 0, i);
28352 unsigned HOST_WIDE_INT ei;
28354 if (!CONST_INT_P (er))
28357 if (ei >= 2 * nelt)
28362 /* Validate that the halves of the permute are halves. */
28363 for (i = 0; i < nelt2 - 1; ++i)
28364 if (ipar[i] + 1 != ipar[i + 1])
28366 for (i = nelt2; i < nelt - 1; ++i)
28367 if (ipar[i] + 1 != ipar[i + 1])
28370 /* Reconstruct the mask. */
28371 for (i = 0; i < 2; ++i)
28373 unsigned e = ipar[i * nelt2];
28377 mask |= e << (i * 4);
28380 /* Make sure success has a non-zero value by adding one. */
28385 /* Store OPERAND to the memory after reload is completed. This means
28386 that we can't easily use assign_stack_local. */
28388 ix86_force_to_memory (enum machine_mode mode, rtx operand)
28392 gcc_assert (reload_completed);
28393 if (ix86_using_red_zone ())
28395 result = gen_rtx_MEM (mode,
28396 gen_rtx_PLUS (Pmode,
28398 GEN_INT (-RED_ZONE_SIZE)));
28399 emit_move_insn (result, operand);
28401 else if (TARGET_64BIT)
28407 operand = gen_lowpart (DImode, operand);
28411 gen_rtx_SET (VOIDmode,
28412 gen_rtx_MEM (DImode,
28413 gen_rtx_PRE_DEC (DImode,
28414 stack_pointer_rtx)),
28418 gcc_unreachable ();
28420 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28429 split_double_mode (mode, &operand, 1, operands, operands + 1);
28431 gen_rtx_SET (VOIDmode,
28432 gen_rtx_MEM (SImode,
28433 gen_rtx_PRE_DEC (Pmode,
28434 stack_pointer_rtx)),
28437 gen_rtx_SET (VOIDmode,
28438 gen_rtx_MEM (SImode,
28439 gen_rtx_PRE_DEC (Pmode,
28440 stack_pointer_rtx)),
28445 /* Store HImodes as SImodes. */
28446 operand = gen_lowpart (SImode, operand);
28450 gen_rtx_SET (VOIDmode,
28451 gen_rtx_MEM (GET_MODE (operand),
28452 gen_rtx_PRE_DEC (SImode,
28453 stack_pointer_rtx)),
28457 gcc_unreachable ();
28459 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28464 /* Free operand from the memory. */
28466 ix86_free_from_memory (enum machine_mode mode)
28468 if (!ix86_using_red_zone ())
28472 if (mode == DImode || TARGET_64BIT)
28476 /* Use LEA to deallocate stack space. In peephole2 it will be converted
28477 to pop or add instruction if registers are available. */
28478 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
28479 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
28484 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
28486 Put float CONST_DOUBLE in the constant pool instead of fp regs.
28487 QImode must go into class Q_REGS.
28488 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
28489 movdf to do mem-to-mem moves through integer regs. */
28492 ix86_preferred_reload_class (rtx x, reg_class_t regclass)
28494 enum machine_mode mode = GET_MODE (x);
28496 /* We're only allowed to return a subclass of CLASS. Many of the
28497 following checks fail for NO_REGS, so eliminate that early. */
28498 if (regclass == NO_REGS)
28501 /* All classes can load zeros. */
28502 if (x == CONST0_RTX (mode))
28505 /* Force constants into memory if we are loading a (nonzero) constant into
28506 an MMX or SSE register. This is because there are no MMX/SSE instructions
28507 to load from a constant. */
28509 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
28512 /* Prefer SSE regs only, if we can use them for math. */
28513 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
28514 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
28516 /* Floating-point constants need more complex checks. */
28517 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
28519 /* General regs can load everything. */
28520 if (reg_class_subset_p (regclass, GENERAL_REGS))
28523 /* Floats can load 0 and 1 plus some others. Note that we eliminated
28524 zero above. We only want to wind up preferring 80387 registers if
28525 we plan on doing computation with them. */
28527 && standard_80387_constant_p (x))
28529 /* Limit class to non-sse. */
28530 if (regclass == FLOAT_SSE_REGS)
28532 if (regclass == FP_TOP_SSE_REGS)
28534 if (regclass == FP_SECOND_SSE_REGS)
28535 return FP_SECOND_REG;
28536 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
28543 /* Generally when we see PLUS here, it's the function invariant
28544 (plus soft-fp const_int). Which can only be computed into general
28546 if (GET_CODE (x) == PLUS)
28547 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
28549 /* QImode constants are easy to load, but non-constant QImode data
28550 must go into Q_REGS. */
28551 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
28553 if (reg_class_subset_p (regclass, Q_REGS))
28555 if (reg_class_subset_p (Q_REGS, regclass))
28563 /* Discourage putting floating-point values in SSE registers unless
28564 SSE math is being used, and likewise for the 387 registers. */
28566 ix86_preferred_output_reload_class (rtx x, reg_class_t regclass)
28568 enum machine_mode mode = GET_MODE (x);
28570 /* Restrict the output reload class to the register bank that we are doing
28571 math on. If we would like not to return a subset of CLASS, reject this
28572 alternative: if reload cannot do this, it will still use its choice. */
28573 mode = GET_MODE (x);
28574 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
28575 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
28577 if (X87_FLOAT_MODE_P (mode))
28579 if (regclass == FP_TOP_SSE_REGS)
28581 else if (regclass == FP_SECOND_SSE_REGS)
28582 return FP_SECOND_REG;
28584 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
28591 ix86_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
28592 enum machine_mode mode,
28593 secondary_reload_info *sri ATTRIBUTE_UNUSED)
28595 /* QImode spills from non-QI registers require
28596 intermediate register on 32bit targets. */
28598 && !in_p && mode == QImode
28599 && (rclass == GENERAL_REGS
28600 || rclass == LEGACY_REGS
28601 || rclass == INDEX_REGS))
28610 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
28611 regno = true_regnum (x);
28613 /* Return Q_REGS if the operand is in memory. */
28618 /* This condition handles corner case where an expression involving
28619 pointers gets vectorized. We're trying to use the address of a
28620 stack slot as a vector initializer.
28622 (set (reg:V2DI 74 [ vect_cst_.2 ])
28623 (vec_duplicate:V2DI (reg/f:DI 20 frame)))
28625 Eventually frame gets turned into sp+offset like this:
28627 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28628 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28629 (const_int 392 [0x188]))))
28631 That later gets turned into:
28633 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28634 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28635 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))))
28637 We'll have the following reload recorded:
28639 Reload 0: reload_in (DI) =
28640 (plus:DI (reg/f:DI 7 sp)
28641 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))
28642 reload_out (V2DI) = (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28643 SSE_REGS, RELOAD_OTHER (opnum = 0), can't combine
28644 reload_in_reg: (plus:DI (reg/f:DI 7 sp) (const_int 392 [0x188]))
28645 reload_out_reg: (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28646 reload_reg_rtx: (reg:V2DI 22 xmm1)
28648 Which isn't going to work since SSE instructions can't handle scalar
28649 additions. Returning GENERAL_REGS forces the addition into integer
28650 register and reload can handle subsequent reloads without problems. */
28652 if (in_p && GET_CODE (x) == PLUS
28653 && SSE_CLASS_P (rclass)
28654 && SCALAR_INT_MODE_P (mode))
28655 return GENERAL_REGS;
28660 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
28663 ix86_class_likely_spilled_p (reg_class_t rclass)
28674 case SSE_FIRST_REG:
28676 case FP_SECOND_REG:
28686 /* If we are copying between general and FP registers, we need a memory
28687 location. The same is true for SSE and MMX registers.
28689 To optimize register_move_cost performance, allow inline variant.
28691 The macro can't work reliably when one of the CLASSES is class containing
28692 registers from multiple units (SSE, MMX, integer). We avoid this by never
28693 combining those units in single alternative in the machine description.
28694 Ensure that this constraint holds to avoid unexpected surprises.
28696 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
28697 enforce these sanity checks. */
28700 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28701 enum machine_mode mode, int strict)
28703 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
28704 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
28705 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
28706 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
28707 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
28708 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
28710 gcc_assert (!strict);
28714 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
28717 /* ??? This is a lie. We do have moves between mmx/general, and for
28718 mmx/sse2. But by saying we need secondary memory we discourage the
28719 register allocator from using the mmx registers unless needed. */
28720 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
28723 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28725 /* SSE1 doesn't have any direct moves from other classes. */
28729 /* If the target says that inter-unit moves are more expensive
28730 than moving through memory, then don't generate them. */
28731 if (!TARGET_INTER_UNIT_MOVES)
28734 /* Between SSE and general, we have moves no larger than word size. */
28735 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
28743 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28744 enum machine_mode mode, int strict)
28746 return inline_secondary_memory_needed (class1, class2, mode, strict);
28749 /* Return true if the registers in CLASS cannot represent the change from
28750 modes FROM to TO. */
28753 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
28754 enum reg_class regclass)
28759 /* x87 registers can't do subreg at all, as all values are reformatted
28760 to extended precision. */
28761 if (MAYBE_FLOAT_CLASS_P (regclass))
28764 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
28766 /* Vector registers do not support QI or HImode loads. If we don't
28767 disallow a change to these modes, reload will assume it's ok to
28768 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
28769 the vec_dupv4hi pattern. */
28770 if (GET_MODE_SIZE (from) < 4)
28773 /* Vector registers do not support subreg with nonzero offsets, which
28774 are otherwise valid for integer registers. Since we can't see
28775 whether we have a nonzero offset from here, prohibit all
28776 nonparadoxical subregs changing size. */
28777 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
28784 /* Return the cost of moving data of mode M between a
28785 register and memory. A value of 2 is the default; this cost is
28786 relative to those in `REGISTER_MOVE_COST'.
28788 This function is used extensively by register_move_cost that is used to
28789 build tables at startup. Make it inline in this case.
28790 When IN is 2, return maximum of in and out move cost.
28792 If moving between registers and memory is more expensive than
28793 between two registers, you should define this macro to express the
28796 Model also increased moving costs of QImode registers in non
28800 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
28804 if (FLOAT_CLASS_P (regclass))
28822 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
28823 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
28825 if (SSE_CLASS_P (regclass))
28828 switch (GET_MODE_SIZE (mode))
28843 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
28844 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
28846 if (MMX_CLASS_P (regclass))
28849 switch (GET_MODE_SIZE (mode))
28861 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
28862 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
28864 switch (GET_MODE_SIZE (mode))
28867 if (Q_CLASS_P (regclass) || TARGET_64BIT)
28870 return ix86_cost->int_store[0];
28871 if (TARGET_PARTIAL_REG_DEPENDENCY
28872 && optimize_function_for_speed_p (cfun))
28873 cost = ix86_cost->movzbl_load;
28875 cost = ix86_cost->int_load[0];
28877 return MAX (cost, ix86_cost->int_store[0]);
28883 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
28885 return ix86_cost->movzbl_load;
28887 return ix86_cost->int_store[0] + 4;
28892 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
28893 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
28895 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
28896 if (mode == TFmode)
28899 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
28901 cost = ix86_cost->int_load[2];
28903 cost = ix86_cost->int_store[2];
28904 return (cost * (((int) GET_MODE_SIZE (mode)
28905 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
28910 ix86_memory_move_cost (enum machine_mode mode, reg_class_t regclass,
28913 return inline_memory_move_cost (mode, (enum reg_class) regclass, in ? 1 : 0);
28917 /* Return the cost of moving data from a register in class CLASS1 to
28918 one in class CLASS2.
28920 It is not required that the cost always equal 2 when FROM is the same as TO;
28921 on some machines it is expensive to move between registers if they are not
28922 general registers. */
28925 ix86_register_move_cost (enum machine_mode mode, reg_class_t class1_i,
28926 reg_class_t class2_i)
28928 enum reg_class class1 = (enum reg_class) class1_i;
28929 enum reg_class class2 = (enum reg_class) class2_i;
28931 /* In case we require secondary memory, compute cost of the store followed
28932 by load. In order to avoid bad register allocation choices, we need
28933 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
28935 if (inline_secondary_memory_needed (class1, class2, mode, 0))
28939 cost += inline_memory_move_cost (mode, class1, 2);
28940 cost += inline_memory_move_cost (mode, class2, 2);
28942 /* In case of copying from general_purpose_register we may emit multiple
28943 stores followed by single load causing memory size mismatch stall.
28944 Count this as arbitrarily high cost of 20. */
28945 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
28948 /* In the case of FP/MMX moves, the registers actually overlap, and we
28949 have to switch modes in order to treat them differently. */
28950 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
28951 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
28957 /* Moves between SSE/MMX and integer unit are expensive. */
28958 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
28959 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28961 /* ??? By keeping returned value relatively high, we limit the number
28962 of moves between integer and MMX/SSE registers for all targets.
28963 Additionally, high value prevents problem with x86_modes_tieable_p(),
28964 where integer modes in MMX/SSE registers are not tieable
28965 because of missing QImode and HImode moves to, from or between
28966 MMX/SSE registers. */
28967 return MAX (8, ix86_cost->mmxsse_to_integer);
28969 if (MAYBE_FLOAT_CLASS_P (class1))
28970 return ix86_cost->fp_move;
28971 if (MAYBE_SSE_CLASS_P (class1))
28972 return ix86_cost->sse_move;
28973 if (MAYBE_MMX_CLASS_P (class1))
28974 return ix86_cost->mmx_move;
28978 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
28981 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
28983 /* Flags and only flags can only hold CCmode values. */
28984 if (CC_REGNO_P (regno))
28985 return GET_MODE_CLASS (mode) == MODE_CC;
28986 if (GET_MODE_CLASS (mode) == MODE_CC
28987 || GET_MODE_CLASS (mode) == MODE_RANDOM
28988 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
28990 if (FP_REGNO_P (regno))
28991 return VALID_FP_MODE_P (mode);
28992 if (SSE_REGNO_P (regno))
28994 /* We implement the move patterns for all vector modes into and
28995 out of SSE registers, even when no operation instructions
28996 are available. OImode move is available only when AVX is
28998 return ((TARGET_AVX && mode == OImode)
28999 || VALID_AVX256_REG_MODE (mode)
29000 || VALID_SSE_REG_MODE (mode)
29001 || VALID_SSE2_REG_MODE (mode)
29002 || VALID_MMX_REG_MODE (mode)
29003 || VALID_MMX_REG_MODE_3DNOW (mode));
29005 if (MMX_REGNO_P (regno))
29007 /* We implement the move patterns for 3DNOW modes even in MMX mode,
29008 so if the register is available at all, then we can move data of
29009 the given mode into or out of it. */
29010 return (VALID_MMX_REG_MODE (mode)
29011 || VALID_MMX_REG_MODE_3DNOW (mode));
29014 if (mode == QImode)
29016 /* Take care for QImode values - they can be in non-QI regs,
29017 but then they do cause partial register stalls. */
29018 if (regno <= BX_REG || TARGET_64BIT)
29020 if (!TARGET_PARTIAL_REG_STALL)
29022 return reload_in_progress || reload_completed;
29024 /* We handle both integer and floats in the general purpose registers. */
29025 else if (VALID_INT_MODE_P (mode))
29027 else if (VALID_FP_MODE_P (mode))
29029 else if (VALID_DFP_MODE_P (mode))
29031 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
29032 on to use that value in smaller contexts, this can easily force a
29033 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
29034 supporting DImode, allow it. */
29035 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
29041 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
29042 tieable integer mode. */
29045 ix86_tieable_integer_mode_p (enum machine_mode mode)
29054 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
29057 return TARGET_64BIT;
29064 /* Return true if MODE1 is accessible in a register that can hold MODE2
29065 without copying. That is, all register classes that can hold MODE2
29066 can also hold MODE1. */
29069 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
29071 if (mode1 == mode2)
29074 if (ix86_tieable_integer_mode_p (mode1)
29075 && ix86_tieable_integer_mode_p (mode2))
29078 /* MODE2 being XFmode implies fp stack or general regs, which means we
29079 can tie any smaller floating point modes to it. Note that we do not
29080 tie this with TFmode. */
29081 if (mode2 == XFmode)
29082 return mode1 == SFmode || mode1 == DFmode;
29084 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
29085 that we can tie it with SFmode. */
29086 if (mode2 == DFmode)
29087 return mode1 == SFmode;
29089 /* If MODE2 is only appropriate for an SSE register, then tie with
29090 any other mode acceptable to SSE registers. */
29091 if (GET_MODE_SIZE (mode2) == 16
29092 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
29093 return (GET_MODE_SIZE (mode1) == 16
29094 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
29096 /* If MODE2 is appropriate for an MMX register, then tie
29097 with any other mode acceptable to MMX registers. */
29098 if (GET_MODE_SIZE (mode2) == 8
29099 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
29100 return (GET_MODE_SIZE (mode1) == 8
29101 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
29106 /* Compute a (partial) cost for rtx X. Return true if the complete
29107 cost has been computed, and false if subexpressions should be
29108 scanned. In either case, *TOTAL contains the cost result. */
29111 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
29113 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
29114 enum machine_mode mode = GET_MODE (x);
29115 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
29123 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
29125 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
29127 else if (flag_pic && SYMBOLIC_CONST (x)
29129 || (!GET_CODE (x) != LABEL_REF
29130 && (GET_CODE (x) != SYMBOL_REF
29131 || !SYMBOL_REF_LOCAL_P (x)))))
29138 if (mode == VOIDmode)
29141 switch (standard_80387_constant_p (x))
29146 default: /* Other constants */
29151 /* Start with (MEM (SYMBOL_REF)), since that's where
29152 it'll probably end up. Add a penalty for size. */
29153 *total = (COSTS_N_INSNS (1)
29154 + (flag_pic != 0 && !TARGET_64BIT)
29155 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
29161 /* The zero extensions is often completely free on x86_64, so make
29162 it as cheap as possible. */
29163 if (TARGET_64BIT && mode == DImode
29164 && GET_MODE (XEXP (x, 0)) == SImode)
29166 else if (TARGET_ZERO_EXTEND_WITH_AND)
29167 *total = cost->add;
29169 *total = cost->movzx;
29173 *total = cost->movsx;
29177 if (CONST_INT_P (XEXP (x, 1))
29178 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
29180 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
29183 *total = cost->add;
29186 if ((value == 2 || value == 3)
29187 && cost->lea <= cost->shift_const)
29189 *total = cost->lea;
29199 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
29201 if (CONST_INT_P (XEXP (x, 1)))
29203 if (INTVAL (XEXP (x, 1)) > 32)
29204 *total = cost->shift_const + COSTS_N_INSNS (2);
29206 *total = cost->shift_const * 2;
29210 if (GET_CODE (XEXP (x, 1)) == AND)
29211 *total = cost->shift_var * 2;
29213 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
29218 if (CONST_INT_P (XEXP (x, 1)))
29219 *total = cost->shift_const;
29221 *total = cost->shift_var;
29229 gcc_assert (FLOAT_MODE_P (mode));
29230 gcc_assert (TARGET_FMA || TARGET_FMA4);
29232 /* ??? SSE scalar/vector cost should be used here. */
29233 /* ??? Bald assumption that fma has the same cost as fmul. */
29234 *total = cost->fmul;
29235 *total += rtx_cost (XEXP (x, 1), FMA, speed);
29237 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
29239 if (GET_CODE (sub) == NEG)
29241 *total += rtx_cost (sub, FMA, speed);
29244 if (GET_CODE (sub) == NEG)
29246 *total += rtx_cost (sub, FMA, speed);
29251 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29253 /* ??? SSE scalar cost should be used here. */
29254 *total = cost->fmul;
29257 else if (X87_FLOAT_MODE_P (mode))
29259 *total = cost->fmul;
29262 else if (FLOAT_MODE_P (mode))
29264 /* ??? SSE vector cost should be used here. */
29265 *total = cost->fmul;
29270 rtx op0 = XEXP (x, 0);
29271 rtx op1 = XEXP (x, 1);
29273 if (CONST_INT_P (XEXP (x, 1)))
29275 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
29276 for (nbits = 0; value != 0; value &= value - 1)
29280 /* This is arbitrary. */
29283 /* Compute costs correctly for widening multiplication. */
29284 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
29285 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
29286 == GET_MODE_SIZE (mode))
29288 int is_mulwiden = 0;
29289 enum machine_mode inner_mode = GET_MODE (op0);
29291 if (GET_CODE (op0) == GET_CODE (op1))
29292 is_mulwiden = 1, op1 = XEXP (op1, 0);
29293 else if (CONST_INT_P (op1))
29295 if (GET_CODE (op0) == SIGN_EXTEND)
29296 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
29299 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
29303 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
29306 *total = (cost->mult_init[MODE_INDEX (mode)]
29307 + nbits * cost->mult_bit
29308 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
29317 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29318 /* ??? SSE cost should be used here. */
29319 *total = cost->fdiv;
29320 else if (X87_FLOAT_MODE_P (mode))
29321 *total = cost->fdiv;
29322 else if (FLOAT_MODE_P (mode))
29323 /* ??? SSE vector cost should be used here. */
29324 *total = cost->fdiv;
29326 *total = cost->divide[MODE_INDEX (mode)];
29330 if (GET_MODE_CLASS (mode) == MODE_INT
29331 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
29333 if (GET_CODE (XEXP (x, 0)) == PLUS
29334 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
29335 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
29336 && CONSTANT_P (XEXP (x, 1)))
29338 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
29339 if (val == 2 || val == 4 || val == 8)
29341 *total = cost->lea;
29342 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29343 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
29344 outer_code, speed);
29345 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29349 else if (GET_CODE (XEXP (x, 0)) == MULT
29350 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
29352 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
29353 if (val == 2 || val == 4 || val == 8)
29355 *total = cost->lea;
29356 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29357 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29361 else if (GET_CODE (XEXP (x, 0)) == PLUS)
29363 *total = cost->lea;
29364 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29365 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29366 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29373 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29375 /* ??? SSE cost should be used here. */
29376 *total = cost->fadd;
29379 else if (X87_FLOAT_MODE_P (mode))
29381 *total = cost->fadd;
29384 else if (FLOAT_MODE_P (mode))
29386 /* ??? SSE vector cost should be used here. */
29387 *total = cost->fadd;
29395 if (!TARGET_64BIT && mode == DImode)
29397 *total = (cost->add * 2
29398 + (rtx_cost (XEXP (x, 0), outer_code, speed)
29399 << (GET_MODE (XEXP (x, 0)) != DImode))
29400 + (rtx_cost (XEXP (x, 1), outer_code, speed)
29401 << (GET_MODE (XEXP (x, 1)) != DImode)));
29407 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29409 /* ??? SSE cost should be used here. */
29410 *total = cost->fchs;
29413 else if (X87_FLOAT_MODE_P (mode))
29415 *total = cost->fchs;
29418 else if (FLOAT_MODE_P (mode))
29420 /* ??? SSE vector cost should be used here. */
29421 *total = cost->fchs;
29427 if (!TARGET_64BIT && mode == DImode)
29428 *total = cost->add * 2;
29430 *total = cost->add;
29434 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
29435 && XEXP (XEXP (x, 0), 1) == const1_rtx
29436 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
29437 && XEXP (x, 1) == const0_rtx)
29439 /* This kind of construct is implemented using test[bwl].
29440 Treat it as if we had an AND. */
29441 *total = (cost->add
29442 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
29443 + rtx_cost (const1_rtx, outer_code, speed));
29449 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
29454 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29455 /* ??? SSE cost should be used here. */
29456 *total = cost->fabs;
29457 else if (X87_FLOAT_MODE_P (mode))
29458 *total = cost->fabs;
29459 else if (FLOAT_MODE_P (mode))
29460 /* ??? SSE vector cost should be used here. */
29461 *total = cost->fabs;
29465 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29466 /* ??? SSE cost should be used here. */
29467 *total = cost->fsqrt;
29468 else if (X87_FLOAT_MODE_P (mode))
29469 *total = cost->fsqrt;
29470 else if (FLOAT_MODE_P (mode))
29471 /* ??? SSE vector cost should be used here. */
29472 *total = cost->fsqrt;
29476 if (XINT (x, 1) == UNSPEC_TP)
29483 case VEC_DUPLICATE:
29484 /* ??? Assume all of these vector manipulation patterns are
29485 recognizable. In which case they all pretty much have the
29487 *total = COSTS_N_INSNS (1);
29497 static int current_machopic_label_num;
29499 /* Given a symbol name and its associated stub, write out the
29500 definition of the stub. */
29503 machopic_output_stub (FILE *file, const char *symb, const char *stub)
29505 unsigned int length;
29506 char *binder_name, *symbol_name, lazy_ptr_name[32];
29507 int label = ++current_machopic_label_num;
29509 /* For 64-bit we shouldn't get here. */
29510 gcc_assert (!TARGET_64BIT);
29512 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
29513 symb = targetm.strip_name_encoding (symb);
29515 length = strlen (stub);
29516 binder_name = XALLOCAVEC (char, length + 32);
29517 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
29519 length = strlen (symb);
29520 symbol_name = XALLOCAVEC (char, length + 32);
29521 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
29523 sprintf (lazy_ptr_name, "L%d$lz", label);
29525 if (MACHOPIC_ATT_STUB)
29526 switch_to_section (darwin_sections[machopic_picsymbol_stub3_section]);
29527 else if (MACHOPIC_PURE)
29529 if (TARGET_DEEP_BRANCH_PREDICTION)
29530 switch_to_section (darwin_sections[machopic_picsymbol_stub2_section]);
29532 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
29535 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
29537 fprintf (file, "%s:\n", stub);
29538 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29540 if (MACHOPIC_ATT_STUB)
29542 fprintf (file, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
29544 else if (MACHOPIC_PURE)
29547 if (TARGET_DEEP_BRANCH_PREDICTION)
29549 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29550 rtx tmp = gen_rtx_REG (SImode, 2 /* ECX */);
29551 output_set_got (tmp, NULL_RTX); /* "CALL ___<cpu>.get_pc_thunk.cx". */
29552 fprintf (file, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n", label, lazy_ptr_name, label);
29556 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %eax". */
29557 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%ecx\n", label, label);
29558 fprintf (file, "\tmovl %s-LPC$%d(%%ecx),%%ecx\n", lazy_ptr_name, label);
29560 fprintf (file, "\tjmp\t*%%ecx\n");
29563 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
29565 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
29566 it needs no stub-binding-helper. */
29567 if (MACHOPIC_ATT_STUB)
29570 fprintf (file, "%s:\n", binder_name);
29574 fprintf (file, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name, binder_name);
29575 fprintf (file, "\tpushl\t%%ecx\n");
29578 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
29580 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
29582 /* N.B. Keep the correspondence of these
29583 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
29584 old-pic/new-pic/non-pic stubs; altering this will break
29585 compatibility with existing dylibs. */
29589 if (TARGET_DEEP_BRANCH_PREDICTION)
29590 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29591 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr2_section]);
29593 /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %ebx". */
29594 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
29597 /* 16-byte -mdynamic-no-pic stub. */
29598 switch_to_section(darwin_sections[machopic_lazy_symbol_ptr3_section]);
29600 fprintf (file, "%s:\n", lazy_ptr_name);
29601 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29602 fprintf (file, ASM_LONG "%s\n", binder_name);
29604 #endif /* TARGET_MACHO */
29606 /* Order the registers for register allocator. */
29609 x86_order_regs_for_local_alloc (void)
29614 /* First allocate the local general purpose registers. */
29615 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29616 if (GENERAL_REGNO_P (i) && call_used_regs[i])
29617 reg_alloc_order [pos++] = i;
29619 /* Global general purpose registers. */
29620 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29621 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
29622 reg_alloc_order [pos++] = i;
29624 /* x87 registers come first in case we are doing FP math
29626 if (!TARGET_SSE_MATH)
29627 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29628 reg_alloc_order [pos++] = i;
29630 /* SSE registers. */
29631 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
29632 reg_alloc_order [pos++] = i;
29633 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
29634 reg_alloc_order [pos++] = i;
29636 /* x87 registers. */
29637 if (TARGET_SSE_MATH)
29638 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29639 reg_alloc_order [pos++] = i;
29641 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
29642 reg_alloc_order [pos++] = i;
29644 /* Initialize the rest of array as we do not allocate some registers
29646 while (pos < FIRST_PSEUDO_REGISTER)
29647 reg_alloc_order [pos++] = 0;
29650 /* Handle a "callee_pop_aggregate_return" attribute; arguments as
29651 in struct attribute_spec handler. */
29653 ix86_handle_callee_pop_aggregate_return (tree *node, tree name,
29655 int flags ATTRIBUTE_UNUSED,
29656 bool *no_add_attrs)
29658 if (TREE_CODE (*node) != FUNCTION_TYPE
29659 && TREE_CODE (*node) != METHOD_TYPE
29660 && TREE_CODE (*node) != FIELD_DECL
29661 && TREE_CODE (*node) != TYPE_DECL)
29663 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29665 *no_add_attrs = true;
29670 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
29672 *no_add_attrs = true;
29675 if (is_attribute_p ("callee_pop_aggregate_return", name))
29679 cst = TREE_VALUE (args);
29680 if (TREE_CODE (cst) != INTEGER_CST)
29682 warning (OPT_Wattributes,
29683 "%qE attribute requires an integer constant argument",
29685 *no_add_attrs = true;
29687 else if (compare_tree_int (cst, 0) != 0
29688 && compare_tree_int (cst, 1) != 0)
29690 warning (OPT_Wattributes,
29691 "argument to %qE attribute is neither zero, nor one",
29693 *no_add_attrs = true;
29702 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
29703 struct attribute_spec.handler. */
29705 ix86_handle_abi_attribute (tree *node, tree name,
29706 tree args ATTRIBUTE_UNUSED,
29707 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29709 if (TREE_CODE (*node) != FUNCTION_TYPE
29710 && TREE_CODE (*node) != METHOD_TYPE
29711 && TREE_CODE (*node) != FIELD_DECL
29712 && TREE_CODE (*node) != TYPE_DECL)
29714 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29716 *no_add_attrs = true;
29721 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
29723 *no_add_attrs = true;
29727 /* Can combine regparm with all attributes but fastcall. */
29728 if (is_attribute_p ("ms_abi", name))
29730 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
29732 error ("ms_abi and sysv_abi attributes are not compatible");
29737 else if (is_attribute_p ("sysv_abi", name))
29739 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
29741 error ("ms_abi and sysv_abi attributes are not compatible");
29750 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
29751 struct attribute_spec.handler. */
29753 ix86_handle_struct_attribute (tree *node, tree name,
29754 tree args ATTRIBUTE_UNUSED,
29755 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29758 if (DECL_P (*node))
29760 if (TREE_CODE (*node) == TYPE_DECL)
29761 type = &TREE_TYPE (*node);
29766 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
29767 || TREE_CODE (*type) == UNION_TYPE)))
29769 warning (OPT_Wattributes, "%qE attribute ignored",
29771 *no_add_attrs = true;
29774 else if ((is_attribute_p ("ms_struct", name)
29775 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
29776 || ((is_attribute_p ("gcc_struct", name)
29777 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
29779 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
29781 *no_add_attrs = true;
29788 ix86_handle_fndecl_attribute (tree *node, tree name,
29789 tree args ATTRIBUTE_UNUSED,
29790 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29792 if (TREE_CODE (*node) != FUNCTION_DECL)
29794 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29796 *no_add_attrs = true;
29802 ix86_ms_bitfield_layout_p (const_tree record_type)
29804 return ((TARGET_MS_BITFIELD_LAYOUT
29805 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
29806 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type)));
29809 /* Returns an expression indicating where the this parameter is
29810 located on entry to the FUNCTION. */
29813 x86_this_parameter (tree function)
29815 tree type = TREE_TYPE (function);
29816 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
29821 const int *parm_regs;
29823 if (ix86_function_type_abi (type) == MS_ABI)
29824 parm_regs = x86_64_ms_abi_int_parameter_registers;
29826 parm_regs = x86_64_int_parameter_registers;
29827 return gen_rtx_REG (DImode, parm_regs[aggr]);
29830 nregs = ix86_function_regparm (type, function);
29832 if (nregs > 0 && !stdarg_p (type))
29835 unsigned int ccvt = ix86_get_callcvt (type);
29837 if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
29838 regno = aggr ? DX_REG : CX_REG;
29839 else if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
29843 return gen_rtx_MEM (SImode,
29844 plus_constant (stack_pointer_rtx, 4));
29853 return gen_rtx_MEM (SImode,
29854 plus_constant (stack_pointer_rtx, 4));
29857 return gen_rtx_REG (SImode, regno);
29860 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
29863 /* Determine whether x86_output_mi_thunk can succeed. */
29866 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
29867 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
29868 HOST_WIDE_INT vcall_offset, const_tree function)
29870 /* 64-bit can handle anything. */
29874 /* For 32-bit, everything's fine if we have one free register. */
29875 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
29878 /* Need a free register for vcall_offset. */
29882 /* Need a free register for GOT references. */
29883 if (flag_pic && !targetm.binds_local_p (function))
29886 /* Otherwise ok. */
29890 /* Output the assembler code for a thunk function. THUNK_DECL is the
29891 declaration for the thunk function itself, FUNCTION is the decl for
29892 the target function. DELTA is an immediate constant offset to be
29893 added to THIS. If VCALL_OFFSET is nonzero, the word at
29894 *(*this + vcall_offset) should be added to THIS. */
29897 x86_output_mi_thunk (FILE *file,
29898 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
29899 HOST_WIDE_INT vcall_offset, tree function)
29902 rtx this_param = x86_this_parameter (function);
29905 /* Make sure unwind info is emitted for the thunk if needed. */
29906 final_start_function (emit_barrier (), file, 1);
29908 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
29909 pull it in now and let DELTA benefit. */
29910 if (REG_P (this_param))
29911 this_reg = this_param;
29912 else if (vcall_offset)
29914 /* Put the this parameter into %eax. */
29915 xops[0] = this_param;
29916 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
29917 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29920 this_reg = NULL_RTX;
29922 /* Adjust the this parameter by a fixed constant. */
29925 xops[0] = GEN_INT (delta);
29926 xops[1] = this_reg ? this_reg : this_param;
29929 if (!x86_64_general_operand (xops[0], DImode))
29931 tmp = gen_rtx_REG (DImode, R10_REG);
29933 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
29935 xops[1] = this_param;
29937 if (x86_maybe_negate_const_int (&xops[0], DImode))
29938 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
29940 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
29942 else if (x86_maybe_negate_const_int (&xops[0], SImode))
29943 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
29945 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
29948 /* Adjust the this parameter by a value stored in the vtable. */
29952 tmp = gen_rtx_REG (DImode, R10_REG);
29955 int tmp_regno = CX_REG;
29956 unsigned int ccvt = ix86_get_callcvt (TREE_TYPE (function));
29957 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) != 0)
29958 tmp_regno = AX_REG;
29959 tmp = gen_rtx_REG (SImode, tmp_regno);
29962 xops[0] = gen_rtx_MEM (Pmode, this_reg);
29964 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29966 /* Adjust the this parameter. */
29967 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
29968 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
29970 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
29971 xops[0] = GEN_INT (vcall_offset);
29973 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
29974 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
29976 xops[1] = this_reg;
29977 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
29980 /* If necessary, drop THIS back to its stack slot. */
29981 if (this_reg && this_reg != this_param)
29983 xops[0] = this_reg;
29984 xops[1] = this_param;
29985 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
29988 xops[0] = XEXP (DECL_RTL (function), 0);
29991 if (!flag_pic || targetm.binds_local_p (function)
29992 || DEFAULT_ABI == MS_ABI)
29993 output_asm_insn ("jmp\t%P0", xops);
29994 /* All thunks should be in the same object as their target,
29995 and thus binds_local_p should be true. */
29996 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
29997 gcc_unreachable ();
30000 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
30001 tmp = gen_rtx_CONST (Pmode, tmp);
30002 tmp = gen_rtx_MEM (QImode, tmp);
30004 output_asm_insn ("jmp\t%A0", xops);
30009 if (!flag_pic || targetm.binds_local_p (function))
30010 output_asm_insn ("jmp\t%P0", xops);
30015 rtx sym_ref = XEXP (DECL_RTL (function), 0);
30016 if (TARGET_MACHO_BRANCH_ISLANDS)
30017 sym_ref = (gen_rtx_SYMBOL_REF
30019 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
30020 tmp = gen_rtx_MEM (QImode, sym_ref);
30022 output_asm_insn ("jmp\t%0", xops);
30025 #endif /* TARGET_MACHO */
30027 tmp = gen_rtx_REG (SImode, CX_REG);
30028 output_set_got (tmp, NULL_RTX);
30031 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
30032 output_asm_insn ("jmp\t{*}%1", xops);
30035 final_end_function ();
30039 x86_file_start (void)
30041 default_file_start ();
30043 darwin_file_start ();
30045 if (X86_FILE_START_VERSION_DIRECTIVE)
30046 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
30047 if (X86_FILE_START_FLTUSED)
30048 fputs ("\t.global\t__fltused\n", asm_out_file);
30049 if (ix86_asm_dialect == ASM_INTEL)
30050 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
30054 x86_field_alignment (tree field, int computed)
30056 enum machine_mode mode;
30057 tree type = TREE_TYPE (field);
30059 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
30061 mode = TYPE_MODE (strip_array_types (type));
30062 if (mode == DFmode || mode == DCmode
30063 || GET_MODE_CLASS (mode) == MODE_INT
30064 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
30065 return MIN (32, computed);
30069 /* Output assembler code to FILE to increment profiler label # LABELNO
30070 for profiling a function entry. */
30072 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
30074 const char *mcount_name = (flag_fentry ? MCOUNT_NAME_BEFORE_PROLOGUE
30079 #ifndef NO_PROFILE_COUNTERS
30080 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
30083 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
30084 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name);
30086 fprintf (file, "\tcall\t%s\n", mcount_name);
30090 #ifndef NO_PROFILE_COUNTERS
30091 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
30094 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name);
30098 #ifndef NO_PROFILE_COUNTERS
30099 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
30102 fprintf (file, "\tcall\t%s\n", mcount_name);
30106 /* We don't have exact information about the insn sizes, but we may assume
30107 quite safely that we are informed about all 1 byte insns and memory
30108 address sizes. This is enough to eliminate unnecessary padding in
30112 min_insn_size (rtx insn)
30116 if (!INSN_P (insn) || !active_insn_p (insn))
30119 /* Discard alignments we've emit and jump instructions. */
30120 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
30121 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
30123 if (JUMP_TABLE_DATA_P (insn))
30126 /* Important case - calls are always 5 bytes.
30127 It is common to have many calls in the row. */
30129 && symbolic_reference_mentioned_p (PATTERN (insn))
30130 && !SIBLING_CALL_P (insn))
30132 len = get_attr_length (insn);
30136 /* For normal instructions we rely on get_attr_length being exact,
30137 with a few exceptions. */
30138 if (!JUMP_P (insn))
30140 enum attr_type type = get_attr_type (insn);
30145 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
30146 || asm_noperands (PATTERN (insn)) >= 0)
30153 /* Otherwise trust get_attr_length. */
30157 l = get_attr_length_address (insn);
30158 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
30167 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
30169 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
30173 ix86_avoid_jump_mispredicts (void)
30175 rtx insn, start = get_insns ();
30176 int nbytes = 0, njumps = 0;
30179 /* Look for all minimal intervals of instructions containing 4 jumps.
30180 The intervals are bounded by START and INSN. NBYTES is the total
30181 size of instructions in the interval including INSN and not including
30182 START. When the NBYTES is smaller than 16 bytes, it is possible
30183 that the end of START and INSN ends up in the same 16byte page.
30185 The smallest offset in the page INSN can start is the case where START
30186 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
30187 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
30189 for (insn = start; insn; insn = NEXT_INSN (insn))
30193 if (LABEL_P (insn))
30195 int align = label_to_alignment (insn);
30196 int max_skip = label_to_max_skip (insn);
30200 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
30201 already in the current 16 byte page, because otherwise
30202 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
30203 bytes to reach 16 byte boundary. */
30205 || (align <= 3 && max_skip != (1 << align) - 1))
30208 fprintf (dump_file, "Label %i with max_skip %i\n",
30209 INSN_UID (insn), max_skip);
30212 while (nbytes + max_skip >= 16)
30214 start = NEXT_INSN (start);
30215 if ((JUMP_P (start)
30216 && GET_CODE (PATTERN (start)) != ADDR_VEC
30217 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
30219 njumps--, isjump = 1;
30222 nbytes -= min_insn_size (start);
30228 min_size = min_insn_size (insn);
30229 nbytes += min_size;
30231 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
30232 INSN_UID (insn), min_size);
30234 && GET_CODE (PATTERN (insn)) != ADDR_VEC
30235 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
30243 start = NEXT_INSN (start);
30244 if ((JUMP_P (start)
30245 && GET_CODE (PATTERN (start)) != ADDR_VEC
30246 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
30248 njumps--, isjump = 1;
30251 nbytes -= min_insn_size (start);
30253 gcc_assert (njumps >= 0);
30255 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
30256 INSN_UID (start), INSN_UID (insn), nbytes);
30258 if (njumps == 3 && isjump && nbytes < 16)
30260 int padsize = 15 - nbytes + min_insn_size (insn);
30263 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
30264 INSN_UID (insn), padsize);
30265 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
30271 /* AMD Athlon works faster
30272 when RET is not destination of conditional jump or directly preceded
30273 by other jump instruction. We avoid the penalty by inserting NOP just
30274 before the RET instructions in such cases. */
30276 ix86_pad_returns (void)
30281 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
30283 basic_block bb = e->src;
30284 rtx ret = BB_END (bb);
30286 bool replace = false;
30288 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
30289 || optimize_bb_for_size_p (bb))
30291 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
30292 if (active_insn_p (prev) || LABEL_P (prev))
30294 if (prev && LABEL_P (prev))
30299 FOR_EACH_EDGE (e, ei, bb->preds)
30300 if (EDGE_FREQUENCY (e) && e->src->index >= 0
30301 && !(e->flags & EDGE_FALLTHRU))
30306 prev = prev_active_insn (ret);
30308 && ((JUMP_P (prev) && any_condjump_p (prev))
30311 /* Empty functions get branch mispredict even when
30312 the jump destination is not visible to us. */
30313 if (!prev && !optimize_function_for_size_p (cfun))
30318 emit_jump_insn_before (gen_return_internal_long (), ret);
30324 /* Count the minimum number of instructions in BB. Return 4 if the
30325 number of instructions >= 4. */
30328 ix86_count_insn_bb (basic_block bb)
30331 int insn_count = 0;
30333 /* Count number of instructions in this block. Return 4 if the number
30334 of instructions >= 4. */
30335 FOR_BB_INSNS (bb, insn)
30337 /* Only happen in exit blocks. */
30339 && GET_CODE (PATTERN (insn)) == RETURN)
30342 if (NONDEBUG_INSN_P (insn)
30343 && GET_CODE (PATTERN (insn)) != USE
30344 && GET_CODE (PATTERN (insn)) != CLOBBER)
30347 if (insn_count >= 4)
30356 /* Count the minimum number of instructions in code path in BB.
30357 Return 4 if the number of instructions >= 4. */
30360 ix86_count_insn (basic_block bb)
30364 int min_prev_count;
30366 /* Only bother counting instructions along paths with no
30367 more than 2 basic blocks between entry and exit. Given
30368 that BB has an edge to exit, determine if a predecessor
30369 of BB has an edge from entry. If so, compute the number
30370 of instructions in the predecessor block. If there
30371 happen to be multiple such blocks, compute the minimum. */
30372 min_prev_count = 4;
30373 FOR_EACH_EDGE (e, ei, bb->preds)
30376 edge_iterator prev_ei;
30378 if (e->src == ENTRY_BLOCK_PTR)
30380 min_prev_count = 0;
30383 FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
30385 if (prev_e->src == ENTRY_BLOCK_PTR)
30387 int count = ix86_count_insn_bb (e->src);
30388 if (count < min_prev_count)
30389 min_prev_count = count;
30395 if (min_prev_count < 4)
30396 min_prev_count += ix86_count_insn_bb (bb);
30398 return min_prev_count;
30401 /* Pad short funtion to 4 instructions. */
30404 ix86_pad_short_function (void)
30409 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
30411 rtx ret = BB_END (e->src);
30412 if (JUMP_P (ret) && GET_CODE (PATTERN (ret)) == RETURN)
30414 int insn_count = ix86_count_insn (e->src);
30416 /* Pad short function. */
30417 if (insn_count < 4)
30421 /* Find epilogue. */
30424 || NOTE_KIND (insn) != NOTE_INSN_EPILOGUE_BEG))
30425 insn = PREV_INSN (insn);
30430 /* Two NOPs count as one instruction. */
30431 insn_count = 2 * (4 - insn_count);
30432 emit_insn_before (gen_nops (GEN_INT (insn_count)), insn);
30438 /* Implement machine specific optimizations. We implement padding of returns
30439 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
30443 /* We are freeing block_for_insn in the toplev to keep compatibility
30444 with old MDEP_REORGS that are not CFG based. Recompute it now. */
30445 compute_bb_for_insn ();
30447 if (optimize && optimize_function_for_speed_p (cfun))
30449 if (TARGET_PAD_SHORT_FUNCTION)
30450 ix86_pad_short_function ();
30451 else if (TARGET_PAD_RETURNS)
30452 ix86_pad_returns ();
30453 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
30454 if (TARGET_FOUR_JUMP_LIMIT)
30455 ix86_avoid_jump_mispredicts ();
30459 /* Run the vzeroupper optimization if needed. */
30460 if (TARGET_VZEROUPPER)
30461 move_or_delete_vzeroupper ();
30464 /* Return nonzero when QImode register that must be represented via REX prefix
30467 x86_extended_QIreg_mentioned_p (rtx insn)
30470 extract_insn_cached (insn);
30471 for (i = 0; i < recog_data.n_operands; i++)
30472 if (REG_P (recog_data.operand[i])
30473 && REGNO (recog_data.operand[i]) > BX_REG)
30478 /* Return nonzero when P points to register encoded via REX prefix.
30479 Called via for_each_rtx. */
30481 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
30483 unsigned int regno;
30486 regno = REGNO (*p);
30487 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
30490 /* Return true when INSN mentions register that must be encoded using REX
30493 x86_extended_reg_mentioned_p (rtx insn)
30495 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
30496 extended_reg_mentioned_1, NULL);
30499 /* If profitable, negate (without causing overflow) integer constant
30500 of mode MODE at location LOC. Return true in this case. */
30502 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
30506 if (!CONST_INT_P (*loc))
30512 /* DImode x86_64 constants must fit in 32 bits. */
30513 gcc_assert (x86_64_immediate_operand (*loc, mode));
30524 gcc_unreachable ();
30527 /* Avoid overflows. */
30528 if (mode_signbit_p (mode, *loc))
30531 val = INTVAL (*loc);
30533 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
30534 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
30535 if ((val < 0 && val != -128)
30538 *loc = GEN_INT (-val);
30545 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
30546 optabs would emit if we didn't have TFmode patterns. */
30549 x86_emit_floatuns (rtx operands[2])
30551 rtx neglab, donelab, i0, i1, f0, in, out;
30552 enum machine_mode mode, inmode;
30554 inmode = GET_MODE (operands[1]);
30555 gcc_assert (inmode == SImode || inmode == DImode);
30558 in = force_reg (inmode, operands[1]);
30559 mode = GET_MODE (out);
30560 neglab = gen_label_rtx ();
30561 donelab = gen_label_rtx ();
30562 f0 = gen_reg_rtx (mode);
30564 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
30566 expand_float (out, in, 0);
30568 emit_jump_insn (gen_jump (donelab));
30571 emit_label (neglab);
30573 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
30575 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
30577 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
30579 expand_float (f0, i0, 0);
30581 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
30583 emit_label (donelab);
30586 /* AVX does not support 32-byte integer vector operations,
30587 thus the longest vector we are faced with is V16QImode. */
30588 #define MAX_VECT_LEN 16
30590 struct expand_vec_perm_d
30592 rtx target, op0, op1;
30593 unsigned char perm[MAX_VECT_LEN];
30594 enum machine_mode vmode;
30595 unsigned char nelt;
30599 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
30600 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
30602 /* Get a vector mode of the same size as the original but with elements
30603 twice as wide. This is only guaranteed to apply to integral vectors. */
30605 static inline enum machine_mode
30606 get_mode_wider_vector (enum machine_mode o)
30608 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
30609 enum machine_mode n = GET_MODE_WIDER_MODE (o);
30610 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
30611 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
30615 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30616 with all elements equal to VAR. Return true if successful. */
30619 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
30620 rtx target, rtx val)
30643 /* First attempt to recognize VAL as-is. */
30644 dup = gen_rtx_VEC_DUPLICATE (mode, val);
30645 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
30646 if (recog_memoized (insn) < 0)
30649 /* If that fails, force VAL into a register. */
30652 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
30653 seq = get_insns ();
30656 emit_insn_before (seq, insn);
30658 ok = recog_memoized (insn) >= 0;
30667 if (TARGET_SSE || TARGET_3DNOW_A)
30671 val = gen_lowpart (SImode, val);
30672 x = gen_rtx_TRUNCATE (HImode, val);
30673 x = gen_rtx_VEC_DUPLICATE (mode, x);
30674 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30687 struct expand_vec_perm_d dperm;
30691 memset (&dperm, 0, sizeof (dperm));
30692 dperm.target = target;
30693 dperm.vmode = mode;
30694 dperm.nelt = GET_MODE_NUNITS (mode);
30695 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
30697 /* Extend to SImode using a paradoxical SUBREG. */
30698 tmp1 = gen_reg_rtx (SImode);
30699 emit_move_insn (tmp1, gen_lowpart (SImode, val));
30701 /* Insert the SImode value as low element of a V4SImode vector. */
30702 tmp2 = gen_lowpart (V4SImode, dperm.op0);
30703 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
30705 ok = (expand_vec_perm_1 (&dperm)
30706 || expand_vec_perm_broadcast_1 (&dperm));
30718 /* Replicate the value once into the next wider mode and recurse. */
30720 enum machine_mode smode, wsmode, wvmode;
30723 smode = GET_MODE_INNER (mode);
30724 wvmode = get_mode_wider_vector (mode);
30725 wsmode = GET_MODE_INNER (wvmode);
30727 val = convert_modes (wsmode, smode, val, true);
30728 x = expand_simple_binop (wsmode, ASHIFT, val,
30729 GEN_INT (GET_MODE_BITSIZE (smode)),
30730 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30731 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
30733 x = gen_lowpart (wvmode, target);
30734 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
30742 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
30743 rtx x = gen_reg_rtx (hvmode);
30745 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
30748 x = gen_rtx_VEC_CONCAT (mode, x, x);
30749 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30758 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30759 whose ONE_VAR element is VAR, and other elements are zero. Return true
30763 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
30764 rtx target, rtx var, int one_var)
30766 enum machine_mode vsimode;
30769 bool use_vector_set = false;
30774 /* For SSE4.1, we normally use vector set. But if the second
30775 element is zero and inter-unit moves are OK, we use movq
30777 use_vector_set = (TARGET_64BIT
30779 && !(TARGET_INTER_UNIT_MOVES
30785 use_vector_set = TARGET_SSE4_1;
30788 use_vector_set = TARGET_SSE2;
30791 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
30798 use_vector_set = TARGET_AVX;
30801 /* Use ix86_expand_vector_set in 64bit mode only. */
30802 use_vector_set = TARGET_AVX && TARGET_64BIT;
30808 if (use_vector_set)
30810 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
30811 var = force_reg (GET_MODE_INNER (mode), var);
30812 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30828 var = force_reg (GET_MODE_INNER (mode), var);
30829 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
30830 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30835 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
30836 new_target = gen_reg_rtx (mode);
30838 new_target = target;
30839 var = force_reg (GET_MODE_INNER (mode), var);
30840 x = gen_rtx_VEC_DUPLICATE (mode, var);
30841 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
30842 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
30845 /* We need to shuffle the value to the correct position, so
30846 create a new pseudo to store the intermediate result. */
30848 /* With SSE2, we can use the integer shuffle insns. */
30849 if (mode != V4SFmode && TARGET_SSE2)
30851 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
30853 GEN_INT (one_var == 1 ? 0 : 1),
30854 GEN_INT (one_var == 2 ? 0 : 1),
30855 GEN_INT (one_var == 3 ? 0 : 1)));
30856 if (target != new_target)
30857 emit_move_insn (target, new_target);
30861 /* Otherwise convert the intermediate result to V4SFmode and
30862 use the SSE1 shuffle instructions. */
30863 if (mode != V4SFmode)
30865 tmp = gen_reg_rtx (V4SFmode);
30866 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
30871 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
30873 GEN_INT (one_var == 1 ? 0 : 1),
30874 GEN_INT (one_var == 2 ? 0+4 : 1+4),
30875 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
30877 if (mode != V4SFmode)
30878 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
30879 else if (tmp != target)
30880 emit_move_insn (target, tmp);
30882 else if (target != new_target)
30883 emit_move_insn (target, new_target);
30888 vsimode = V4SImode;
30894 vsimode = V2SImode;
30900 /* Zero extend the variable element to SImode and recurse. */
30901 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
30903 x = gen_reg_rtx (vsimode);
30904 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
30906 gcc_unreachable ();
30908 emit_move_insn (target, gen_lowpart (mode, x));
30916 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30917 consisting of the values in VALS. It is known that all elements
30918 except ONE_VAR are constants. Return true if successful. */
30921 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
30922 rtx target, rtx vals, int one_var)
30924 rtx var = XVECEXP (vals, 0, one_var);
30925 enum machine_mode wmode;
30928 const_vec = copy_rtx (vals);
30929 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
30930 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
30938 /* For the two element vectors, it's just as easy to use
30939 the general case. */
30943 /* Use ix86_expand_vector_set in 64bit mode only. */
30966 /* There's no way to set one QImode entry easily. Combine
30967 the variable value with its adjacent constant value, and
30968 promote to an HImode set. */
30969 x = XVECEXP (vals, 0, one_var ^ 1);
30972 var = convert_modes (HImode, QImode, var, true);
30973 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
30974 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30975 x = GEN_INT (INTVAL (x) & 0xff);
30979 var = convert_modes (HImode, QImode, var, true);
30980 x = gen_int_mode (INTVAL (x) << 8, HImode);
30982 if (x != const0_rtx)
30983 var = expand_simple_binop (HImode, IOR, var, x, var,
30984 1, OPTAB_LIB_WIDEN);
30986 x = gen_reg_rtx (wmode);
30987 emit_move_insn (x, gen_lowpart (wmode, const_vec));
30988 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
30990 emit_move_insn (target, gen_lowpart (mode, x));
30997 emit_move_insn (target, const_vec);
30998 ix86_expand_vector_set (mmx_ok, target, var, one_var);
31002 /* A subroutine of ix86_expand_vector_init_general. Use vector
31003 concatenate to handle the most general case: all values variable,
31004 and none identical. */
31007 ix86_expand_vector_init_concat (enum machine_mode mode,
31008 rtx target, rtx *ops, int n)
31010 enum machine_mode cmode, hmode = VOIDmode;
31011 rtx first[8], second[4];
31051 gcc_unreachable ();
31054 if (!register_operand (ops[1], cmode))
31055 ops[1] = force_reg (cmode, ops[1]);
31056 if (!register_operand (ops[0], cmode))
31057 ops[0] = force_reg (cmode, ops[0]);
31058 emit_insn (gen_rtx_SET (VOIDmode, target,
31059 gen_rtx_VEC_CONCAT (mode, ops[0],
31079 gcc_unreachable ();
31095 gcc_unreachable ();
31100 /* FIXME: We process inputs backward to help RA. PR 36222. */
31103 for (; i > 0; i -= 2, j--)
31105 first[j] = gen_reg_rtx (cmode);
31106 v = gen_rtvec (2, ops[i - 1], ops[i]);
31107 ix86_expand_vector_init (false, first[j],
31108 gen_rtx_PARALLEL (cmode, v));
31114 gcc_assert (hmode != VOIDmode);
31115 for (i = j = 0; i < n; i += 2, j++)
31117 second[j] = gen_reg_rtx (hmode);
31118 ix86_expand_vector_init_concat (hmode, second [j],
31122 ix86_expand_vector_init_concat (mode, target, second, n);
31125 ix86_expand_vector_init_concat (mode, target, first, n);
31129 gcc_unreachable ();
31133 /* A subroutine of ix86_expand_vector_init_general. Use vector
31134 interleave to handle the most general case: all values variable,
31135 and none identical. */
31138 ix86_expand_vector_init_interleave (enum machine_mode mode,
31139 rtx target, rtx *ops, int n)
31141 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
31144 rtx (*gen_load_even) (rtx, rtx, rtx);
31145 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
31146 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
31151 gen_load_even = gen_vec_setv8hi;
31152 gen_interleave_first_low = gen_vec_interleave_lowv4si;
31153 gen_interleave_second_low = gen_vec_interleave_lowv2di;
31154 inner_mode = HImode;
31155 first_imode = V4SImode;
31156 second_imode = V2DImode;
31157 third_imode = VOIDmode;
31160 gen_load_even = gen_vec_setv16qi;
31161 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
31162 gen_interleave_second_low = gen_vec_interleave_lowv4si;
31163 inner_mode = QImode;
31164 first_imode = V8HImode;
31165 second_imode = V4SImode;
31166 third_imode = V2DImode;
31169 gcc_unreachable ();
31172 for (i = 0; i < n; i++)
31174 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
31175 op0 = gen_reg_rtx (SImode);
31176 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
31178 /* Insert the SImode value as low element of V4SImode vector. */
31179 op1 = gen_reg_rtx (V4SImode);
31180 op0 = gen_rtx_VEC_MERGE (V4SImode,
31181 gen_rtx_VEC_DUPLICATE (V4SImode,
31183 CONST0_RTX (V4SImode),
31185 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
31187 /* Cast the V4SImode vector back to a vector in orignal mode. */
31188 op0 = gen_reg_rtx (mode);
31189 emit_move_insn (op0, gen_lowpart (mode, op1));
31191 /* Load even elements into the second positon. */
31192 emit_insn (gen_load_even (op0,
31193 force_reg (inner_mode,
31197 /* Cast vector to FIRST_IMODE vector. */
31198 ops[i] = gen_reg_rtx (first_imode);
31199 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
31202 /* Interleave low FIRST_IMODE vectors. */
31203 for (i = j = 0; i < n; i += 2, j++)
31205 op0 = gen_reg_rtx (first_imode);
31206 emit_insn (gen_interleave_first_low (op0, ops[i], ops[i + 1]));
31208 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
31209 ops[j] = gen_reg_rtx (second_imode);
31210 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
31213 /* Interleave low SECOND_IMODE vectors. */
31214 switch (second_imode)
31217 for (i = j = 0; i < n / 2; i += 2, j++)
31219 op0 = gen_reg_rtx (second_imode);
31220 emit_insn (gen_interleave_second_low (op0, ops[i],
31223 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
31225 ops[j] = gen_reg_rtx (third_imode);
31226 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
31228 second_imode = V2DImode;
31229 gen_interleave_second_low = gen_vec_interleave_lowv2di;
31233 op0 = gen_reg_rtx (second_imode);
31234 emit_insn (gen_interleave_second_low (op0, ops[0],
31237 /* Cast the SECOND_IMODE vector back to a vector on original
31239 emit_insn (gen_rtx_SET (VOIDmode, target,
31240 gen_lowpart (mode, op0)));
31244 gcc_unreachable ();
31248 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
31249 all values variable, and none identical. */
31252 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
31253 rtx target, rtx vals)
31255 rtx ops[32], op0, op1;
31256 enum machine_mode half_mode = VOIDmode;
31263 if (!mmx_ok && !TARGET_SSE)
31275 n = GET_MODE_NUNITS (mode);
31276 for (i = 0; i < n; i++)
31277 ops[i] = XVECEXP (vals, 0, i);
31278 ix86_expand_vector_init_concat (mode, target, ops, n);
31282 half_mode = V16QImode;
31286 half_mode = V8HImode;
31290 n = GET_MODE_NUNITS (mode);
31291 for (i = 0; i < n; i++)
31292 ops[i] = XVECEXP (vals, 0, i);
31293 op0 = gen_reg_rtx (half_mode);
31294 op1 = gen_reg_rtx (half_mode);
31295 ix86_expand_vector_init_interleave (half_mode, op0, ops,
31297 ix86_expand_vector_init_interleave (half_mode, op1,
31298 &ops [n >> 1], n >> 2);
31299 emit_insn (gen_rtx_SET (VOIDmode, target,
31300 gen_rtx_VEC_CONCAT (mode, op0, op1)));
31304 if (!TARGET_SSE4_1)
31312 /* Don't use ix86_expand_vector_init_interleave if we can't
31313 move from GPR to SSE register directly. */
31314 if (!TARGET_INTER_UNIT_MOVES)
31317 n = GET_MODE_NUNITS (mode);
31318 for (i = 0; i < n; i++)
31319 ops[i] = XVECEXP (vals, 0, i);
31320 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
31328 gcc_unreachable ();
31332 int i, j, n_elts, n_words, n_elt_per_word;
31333 enum machine_mode inner_mode;
31334 rtx words[4], shift;
31336 inner_mode = GET_MODE_INNER (mode);
31337 n_elts = GET_MODE_NUNITS (mode);
31338 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
31339 n_elt_per_word = n_elts / n_words;
31340 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
31342 for (i = 0; i < n_words; ++i)
31344 rtx word = NULL_RTX;
31346 for (j = 0; j < n_elt_per_word; ++j)
31348 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
31349 elt = convert_modes (word_mode, inner_mode, elt, true);
31355 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
31356 word, 1, OPTAB_LIB_WIDEN);
31357 word = expand_simple_binop (word_mode, IOR, word, elt,
31358 word, 1, OPTAB_LIB_WIDEN);
31366 emit_move_insn (target, gen_lowpart (mode, words[0]));
31367 else if (n_words == 2)
31369 rtx tmp = gen_reg_rtx (mode);
31370 emit_clobber (tmp);
31371 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
31372 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
31373 emit_move_insn (target, tmp);
31375 else if (n_words == 4)
31377 rtx tmp = gen_reg_rtx (V4SImode);
31378 gcc_assert (word_mode == SImode);
31379 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
31380 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
31381 emit_move_insn (target, gen_lowpart (mode, tmp));
31384 gcc_unreachable ();
31388 /* Initialize vector TARGET via VALS. Suppress the use of MMX
31389 instructions unless MMX_OK is true. */
31392 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
31394 enum machine_mode mode = GET_MODE (target);
31395 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31396 int n_elts = GET_MODE_NUNITS (mode);
31397 int n_var = 0, one_var = -1;
31398 bool all_same = true, all_const_zero = true;
31402 for (i = 0; i < n_elts; ++i)
31404 x = XVECEXP (vals, 0, i);
31405 if (!(CONST_INT_P (x)
31406 || GET_CODE (x) == CONST_DOUBLE
31407 || GET_CODE (x) == CONST_FIXED))
31408 n_var++, one_var = i;
31409 else if (x != CONST0_RTX (inner_mode))
31410 all_const_zero = false;
31411 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
31415 /* Constants are best loaded from the constant pool. */
31418 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
31422 /* If all values are identical, broadcast the value. */
31424 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
31425 XVECEXP (vals, 0, 0)))
31428 /* Values where only one field is non-constant are best loaded from
31429 the pool and overwritten via move later. */
31433 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
31434 XVECEXP (vals, 0, one_var),
31438 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
31442 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
31446 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
31448 enum machine_mode mode = GET_MODE (target);
31449 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31450 enum machine_mode half_mode;
31451 bool use_vec_merge = false;
31453 static rtx (*gen_extract[6][2]) (rtx, rtx)
31455 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
31456 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
31457 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
31458 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
31459 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
31460 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
31462 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
31464 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
31465 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
31466 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
31467 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
31468 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
31469 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
31479 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
31480 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
31482 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
31484 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
31485 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31491 use_vec_merge = TARGET_SSE4_1 && TARGET_64BIT;
31495 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
31496 ix86_expand_vector_extract (false, tmp, target, 1 - elt);
31498 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
31500 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
31501 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31508 /* For the two element vectors, we implement a VEC_CONCAT with
31509 the extraction of the other element. */
31511 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
31512 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
31515 op0 = val, op1 = tmp;
31517 op0 = tmp, op1 = val;
31519 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
31520 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31525 use_vec_merge = TARGET_SSE4_1;
31532 use_vec_merge = true;
31536 /* tmp = target = A B C D */
31537 tmp = copy_to_reg (target);
31538 /* target = A A B B */
31539 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
31540 /* target = X A B B */
31541 ix86_expand_vector_set (false, target, val, 0);
31542 /* target = A X C D */
31543 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31544 const1_rtx, const0_rtx,
31545 GEN_INT (2+4), GEN_INT (3+4)));
31549 /* tmp = target = A B C D */
31550 tmp = copy_to_reg (target);
31551 /* tmp = X B C D */
31552 ix86_expand_vector_set (false, tmp, val, 0);
31553 /* target = A B X D */
31554 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31555 const0_rtx, const1_rtx,
31556 GEN_INT (0+4), GEN_INT (3+4)));
31560 /* tmp = target = A B C D */
31561 tmp = copy_to_reg (target);
31562 /* tmp = X B C D */
31563 ix86_expand_vector_set (false, tmp, val, 0);
31564 /* target = A B X D */
31565 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31566 const0_rtx, const1_rtx,
31567 GEN_INT (2+4), GEN_INT (0+4)));
31571 gcc_unreachable ();
31576 use_vec_merge = TARGET_SSE4_1;
31580 /* Element 0 handled by vec_merge below. */
31583 use_vec_merge = true;
31589 /* With SSE2, use integer shuffles to swap element 0 and ELT,
31590 store into element 0, then shuffle them back. */
31594 order[0] = GEN_INT (elt);
31595 order[1] = const1_rtx;
31596 order[2] = const2_rtx;
31597 order[3] = GEN_INT (3);
31598 order[elt] = const0_rtx;
31600 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31601 order[1], order[2], order[3]));
31603 ix86_expand_vector_set (false, target, val, 0);
31605 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31606 order[1], order[2], order[3]));
31610 /* For SSE1, we have to reuse the V4SF code. */
31611 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
31612 gen_lowpart (SFmode, val), elt);
31617 use_vec_merge = TARGET_SSE2;
31620 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31624 use_vec_merge = TARGET_SSE4_1;
31631 half_mode = V16QImode;
31637 half_mode = V8HImode;
31643 half_mode = V4SImode;
31649 half_mode = V2DImode;
31655 half_mode = V4SFmode;
31661 half_mode = V2DFmode;
31667 /* Compute offset. */
31671 gcc_assert (i <= 1);
31673 /* Extract the half. */
31674 tmp = gen_reg_rtx (half_mode);
31675 emit_insn (gen_extract[j][i] (tmp, target));
31677 /* Put val in tmp at elt. */
31678 ix86_expand_vector_set (false, tmp, val, elt);
31681 emit_insn (gen_insert[j][i] (target, target, tmp));
31690 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
31691 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
31692 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31696 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31698 emit_move_insn (mem, target);
31700 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31701 emit_move_insn (tmp, val);
31703 emit_move_insn (target, mem);
31708 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
31710 enum machine_mode mode = GET_MODE (vec);
31711 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31712 bool use_vec_extr = false;
31725 use_vec_extr = true;
31729 use_vec_extr = TARGET_SSE4_1;
31741 tmp = gen_reg_rtx (mode);
31742 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
31743 GEN_INT (elt), GEN_INT (elt),
31744 GEN_INT (elt+4), GEN_INT (elt+4)));
31748 tmp = gen_reg_rtx (mode);
31749 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
31753 gcc_unreachable ();
31756 use_vec_extr = true;
31761 use_vec_extr = TARGET_SSE4_1;
31775 tmp = gen_reg_rtx (mode);
31776 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
31777 GEN_INT (elt), GEN_INT (elt),
31778 GEN_INT (elt), GEN_INT (elt)));
31782 tmp = gen_reg_rtx (mode);
31783 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
31787 gcc_unreachable ();
31790 use_vec_extr = true;
31795 /* For SSE1, we have to reuse the V4SF code. */
31796 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
31797 gen_lowpart (V4SFmode, vec), elt);
31803 use_vec_extr = TARGET_SSE2;
31806 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31810 use_vec_extr = TARGET_SSE4_1;
31814 /* ??? Could extract the appropriate HImode element and shift. */
31821 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
31822 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
31824 /* Let the rtl optimizers know about the zero extension performed. */
31825 if (inner_mode == QImode || inner_mode == HImode)
31827 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
31828 target = gen_lowpart (SImode, target);
31831 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31835 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31837 emit_move_insn (mem, vec);
31839 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31840 emit_move_insn (target, tmp);
31844 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
31845 pattern to reduce; DEST is the destination; IN is the input vector. */
31848 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
31850 rtx tmp1, tmp2, tmp3;
31852 tmp1 = gen_reg_rtx (V4SFmode);
31853 tmp2 = gen_reg_rtx (V4SFmode);
31854 tmp3 = gen_reg_rtx (V4SFmode);
31856 emit_insn (gen_sse_movhlps (tmp1, in, in));
31857 emit_insn (fn (tmp2, tmp1, in));
31859 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
31860 const1_rtx, const1_rtx,
31861 GEN_INT (1+4), GEN_INT (1+4)));
31862 emit_insn (fn (dest, tmp2, tmp3));
31865 /* Target hook for scalar_mode_supported_p. */
31867 ix86_scalar_mode_supported_p (enum machine_mode mode)
31869 if (DECIMAL_FLOAT_MODE_P (mode))
31870 return default_decimal_float_supported_p ();
31871 else if (mode == TFmode)
31874 return default_scalar_mode_supported_p (mode);
31877 /* Implements target hook vector_mode_supported_p. */
31879 ix86_vector_mode_supported_p (enum machine_mode mode)
31881 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
31883 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
31885 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
31887 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
31889 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
31894 /* Target hook for c_mode_for_suffix. */
31895 static enum machine_mode
31896 ix86_c_mode_for_suffix (char suffix)
31906 /* Worker function for TARGET_MD_ASM_CLOBBERS.
31908 We do this in the new i386 backend to maintain source compatibility
31909 with the old cc0-based compiler. */
31912 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
31913 tree inputs ATTRIBUTE_UNUSED,
31916 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
31918 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
31923 /* Implements target vector targetm.asm.encode_section_info. This
31924 is not used by netware. */
31926 static void ATTRIBUTE_UNUSED
31927 ix86_encode_section_info (tree decl, rtx rtl, int first)
31929 default_encode_section_info (decl, rtl, first);
31931 if (TREE_CODE (decl) == VAR_DECL
31932 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
31933 && ix86_in_large_data_p (decl))
31934 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
31937 /* Worker function for REVERSE_CONDITION. */
31940 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
31942 return (mode != CCFPmode && mode != CCFPUmode
31943 ? reverse_condition (code)
31944 : reverse_condition_maybe_unordered (code));
31947 /* Output code to perform an x87 FP register move, from OPERANDS[1]
31951 output_387_reg_move (rtx insn, rtx *operands)
31953 if (REG_P (operands[0]))
31955 if (REG_P (operands[1])
31956 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31958 if (REGNO (operands[0]) == FIRST_STACK_REG)
31959 return output_387_ffreep (operands, 0);
31960 return "fstp\t%y0";
31962 if (STACK_TOP_P (operands[0]))
31963 return "fld%Z1\t%y1";
31966 else if (MEM_P (operands[0]))
31968 gcc_assert (REG_P (operands[1]));
31969 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31970 return "fstp%Z0\t%y0";
31973 /* There is no non-popping store to memory for XFmode.
31974 So if we need one, follow the store with a load. */
31975 if (GET_MODE (operands[0]) == XFmode)
31976 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
31978 return "fst%Z0\t%y0";
31985 /* Output code to perform a conditional jump to LABEL, if C2 flag in
31986 FP status register is set. */
31989 ix86_emit_fp_unordered_jump (rtx label)
31991 rtx reg = gen_reg_rtx (HImode);
31994 emit_insn (gen_x86_fnstsw_1 (reg));
31996 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
31998 emit_insn (gen_x86_sahf_1 (reg));
32000 temp = gen_rtx_REG (CCmode, FLAGS_REG);
32001 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
32005 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
32007 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
32008 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
32011 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
32012 gen_rtx_LABEL_REF (VOIDmode, label),
32014 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
32016 emit_jump_insn (temp);
32017 predict_jump (REG_BR_PROB_BASE * 10 / 100);
32020 /* Output code to perform a log1p XFmode calculation. */
32022 void ix86_emit_i387_log1p (rtx op0, rtx op1)
32024 rtx label1 = gen_label_rtx ();
32025 rtx label2 = gen_label_rtx ();
32027 rtx tmp = gen_reg_rtx (XFmode);
32028 rtx tmp2 = gen_reg_rtx (XFmode);
32031 emit_insn (gen_absxf2 (tmp, op1));
32032 test = gen_rtx_GE (VOIDmode, tmp,
32033 CONST_DOUBLE_FROM_REAL_VALUE (
32034 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
32036 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
32038 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
32039 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
32040 emit_jump (label2);
32042 emit_label (label1);
32043 emit_move_insn (tmp, CONST1_RTX (XFmode));
32044 emit_insn (gen_addxf3 (tmp, op1, tmp));
32045 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
32046 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
32048 emit_label (label2);
32051 /* Output code to perform a Newton-Rhapson approximation of a single precision
32052 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
32054 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
32056 rtx x0, x1, e0, e1;
32058 x0 = gen_reg_rtx (mode);
32059 e0 = gen_reg_rtx (mode);
32060 e1 = gen_reg_rtx (mode);
32061 x1 = gen_reg_rtx (mode);
32063 /* a / b = a * ((rcp(b) + rcp(b)) - (b * rcp(b) * rcp (b))) */
32065 /* x0 = rcp(b) estimate */
32066 emit_insn (gen_rtx_SET (VOIDmode, x0,
32067 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
32070 emit_insn (gen_rtx_SET (VOIDmode, e0,
32071 gen_rtx_MULT (mode, x0, b)));
32074 emit_insn (gen_rtx_SET (VOIDmode, e0,
32075 gen_rtx_MULT (mode, x0, e0)));
32078 emit_insn (gen_rtx_SET (VOIDmode, e1,
32079 gen_rtx_PLUS (mode, x0, x0)));
32082 emit_insn (gen_rtx_SET (VOIDmode, x1,
32083 gen_rtx_MINUS (mode, e1, e0)));
32086 emit_insn (gen_rtx_SET (VOIDmode, res,
32087 gen_rtx_MULT (mode, a, x1)));
32090 /* Output code to perform a Newton-Rhapson approximation of a
32091 single precision floating point [reciprocal] square root. */
32093 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
32096 rtx x0, e0, e1, e2, e3, mthree, mhalf;
32099 x0 = gen_reg_rtx (mode);
32100 e0 = gen_reg_rtx (mode);
32101 e1 = gen_reg_rtx (mode);
32102 e2 = gen_reg_rtx (mode);
32103 e3 = gen_reg_rtx (mode);
32105 real_from_integer (&r, VOIDmode, -3, -1, 0);
32106 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
32108 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
32109 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
32111 if (VECTOR_MODE_P (mode))
32113 mthree = ix86_build_const_vector (mode, true, mthree);
32114 mhalf = ix86_build_const_vector (mode, true, mhalf);
32117 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
32118 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
32120 /* x0 = rsqrt(a) estimate */
32121 emit_insn (gen_rtx_SET (VOIDmode, x0,
32122 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
32125 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
32130 zero = gen_reg_rtx (mode);
32131 mask = gen_reg_rtx (mode);
32133 zero = force_reg (mode, CONST0_RTX(mode));
32134 emit_insn (gen_rtx_SET (VOIDmode, mask,
32135 gen_rtx_NE (mode, zero, a)));
32137 emit_insn (gen_rtx_SET (VOIDmode, x0,
32138 gen_rtx_AND (mode, x0, mask)));
32142 emit_insn (gen_rtx_SET (VOIDmode, e0,
32143 gen_rtx_MULT (mode, x0, a)));
32145 emit_insn (gen_rtx_SET (VOIDmode, e1,
32146 gen_rtx_MULT (mode, e0, x0)));
32149 mthree = force_reg (mode, mthree);
32150 emit_insn (gen_rtx_SET (VOIDmode, e2,
32151 gen_rtx_PLUS (mode, e1, mthree)));
32153 mhalf = force_reg (mode, mhalf);
32155 /* e3 = -.5 * x0 */
32156 emit_insn (gen_rtx_SET (VOIDmode, e3,
32157 gen_rtx_MULT (mode, x0, mhalf)));
32159 /* e3 = -.5 * e0 */
32160 emit_insn (gen_rtx_SET (VOIDmode, e3,
32161 gen_rtx_MULT (mode, e0, mhalf)));
32162 /* ret = e2 * e3 */
32163 emit_insn (gen_rtx_SET (VOIDmode, res,
32164 gen_rtx_MULT (mode, e2, e3)));
32167 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
32169 static void ATTRIBUTE_UNUSED
32170 i386_solaris_elf_named_section (const char *name, unsigned int flags,
32173 /* With Binutils 2.15, the "@unwind" marker must be specified on
32174 every occurrence of the ".eh_frame" section, not just the first
32177 && strcmp (name, ".eh_frame") == 0)
32179 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
32180 flags & SECTION_WRITE ? "aw" : "a");
32183 default_elf_asm_named_section (name, flags, decl);
32186 /* Return the mangling of TYPE if it is an extended fundamental type. */
32188 static const char *
32189 ix86_mangle_type (const_tree type)
32191 type = TYPE_MAIN_VARIANT (type);
32193 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32194 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32197 switch (TYPE_MODE (type))
32200 /* __float128 is "g". */
32203 /* "long double" or __float80 is "e". */
32210 /* For 32-bit code we can save PIC register setup by using
32211 __stack_chk_fail_local hidden function instead of calling
32212 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
32213 register, so it is better to call __stack_chk_fail directly. */
32216 ix86_stack_protect_fail (void)
32218 return TARGET_64BIT
32219 ? default_external_stack_protect_fail ()
32220 : default_hidden_stack_protect_fail ();
32223 /* Select a format to encode pointers in exception handling data. CODE
32224 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
32225 true if the symbol may be affected by dynamic relocations.
32227 ??? All x86 object file formats are capable of representing this.
32228 After all, the relocation needed is the same as for the call insn.
32229 Whether or not a particular assembler allows us to enter such, I
32230 guess we'll have to see. */
32232 asm_preferred_eh_data_format (int code, int global)
32236 int type = DW_EH_PE_sdata8;
32238 || ix86_cmodel == CM_SMALL_PIC
32239 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
32240 type = DW_EH_PE_sdata4;
32241 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
32243 if (ix86_cmodel == CM_SMALL
32244 || (ix86_cmodel == CM_MEDIUM && code))
32245 return DW_EH_PE_udata4;
32246 return DW_EH_PE_absptr;
32249 /* Expand copysign from SIGN to the positive value ABS_VALUE
32250 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
32253 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
32255 enum machine_mode mode = GET_MODE (sign);
32256 rtx sgn = gen_reg_rtx (mode);
32257 if (mask == NULL_RTX)
32259 enum machine_mode vmode;
32261 if (mode == SFmode)
32263 else if (mode == DFmode)
32268 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), false);
32269 if (!VECTOR_MODE_P (mode))
32271 /* We need to generate a scalar mode mask in this case. */
32272 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
32273 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
32274 mask = gen_reg_rtx (mode);
32275 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
32279 mask = gen_rtx_NOT (mode, mask);
32280 emit_insn (gen_rtx_SET (VOIDmode, sgn,
32281 gen_rtx_AND (mode, mask, sign)));
32282 emit_insn (gen_rtx_SET (VOIDmode, result,
32283 gen_rtx_IOR (mode, abs_value, sgn)));
32286 /* Expand fabs (OP0) and return a new rtx that holds the result. The
32287 mask for masking out the sign-bit is stored in *SMASK, if that is
32290 ix86_expand_sse_fabs (rtx op0, rtx *smask)
32292 enum machine_mode vmode, mode = GET_MODE (op0);
32295 xa = gen_reg_rtx (mode);
32296 if (mode == SFmode)
32298 else if (mode == DFmode)
32302 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), true);
32303 if (!VECTOR_MODE_P (mode))
32305 /* We need to generate a scalar mode mask in this case. */
32306 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
32307 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
32308 mask = gen_reg_rtx (mode);
32309 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
32311 emit_insn (gen_rtx_SET (VOIDmode, xa,
32312 gen_rtx_AND (mode, op0, mask)));
32320 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
32321 swapping the operands if SWAP_OPERANDS is true. The expanded
32322 code is a forward jump to a newly created label in case the
32323 comparison is true. The generated label rtx is returned. */
32325 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
32326 bool swap_operands)
32337 label = gen_label_rtx ();
32338 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
32339 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32340 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
32341 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
32342 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
32343 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
32344 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
32345 JUMP_LABEL (tmp) = label;
32350 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
32351 using comparison code CODE. Operands are swapped for the comparison if
32352 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
32354 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
32355 bool swap_operands)
32357 rtx (*insn)(rtx, rtx, rtx, rtx);
32358 enum machine_mode mode = GET_MODE (op0);
32359 rtx mask = gen_reg_rtx (mode);
32368 insn = mode == DFmode ? gen_setcc_df_sse : gen_setcc_sf_sse;
32370 emit_insn (insn (mask, op0, op1,
32371 gen_rtx_fmt_ee (code, mode, op0, op1)));
32375 /* Generate and return a rtx of mode MODE for 2**n where n is the number
32376 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
32378 ix86_gen_TWO52 (enum machine_mode mode)
32380 REAL_VALUE_TYPE TWO52r;
32383 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
32384 TWO52 = const_double_from_real_value (TWO52r, mode);
32385 TWO52 = force_reg (mode, TWO52);
32390 /* Expand SSE sequence for computing lround from OP1 storing
32393 ix86_expand_lround (rtx op0, rtx op1)
32395 /* C code for the stuff we're doing below:
32396 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
32399 enum machine_mode mode = GET_MODE (op1);
32400 const struct real_format *fmt;
32401 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32404 /* load nextafter (0.5, 0.0) */
32405 fmt = REAL_MODE_FORMAT (mode);
32406 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32407 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32409 /* adj = copysign (0.5, op1) */
32410 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
32411 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
32413 /* adj = op1 + adj */
32414 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
32416 /* op0 = (imode)adj */
32417 expand_fix (op0, adj, 0);
32420 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
32423 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
32425 /* C code for the stuff we're doing below (for do_floor):
32427 xi -= (double)xi > op1 ? 1 : 0;
32430 enum machine_mode fmode = GET_MODE (op1);
32431 enum machine_mode imode = GET_MODE (op0);
32432 rtx ireg, freg, label, tmp;
32434 /* reg = (long)op1 */
32435 ireg = gen_reg_rtx (imode);
32436 expand_fix (ireg, op1, 0);
32438 /* freg = (double)reg */
32439 freg = gen_reg_rtx (fmode);
32440 expand_float (freg, ireg, 0);
32442 /* ireg = (freg > op1) ? ireg - 1 : ireg */
32443 label = ix86_expand_sse_compare_and_jump (UNLE,
32444 freg, op1, !do_floor);
32445 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
32446 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
32447 emit_move_insn (ireg, tmp);
32449 emit_label (label);
32450 LABEL_NUSES (label) = 1;
32452 emit_move_insn (op0, ireg);
32455 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
32456 result in OPERAND0. */
32458 ix86_expand_rint (rtx operand0, rtx operand1)
32460 /* C code for the stuff we're doing below:
32461 xa = fabs (operand1);
32462 if (!isless (xa, 2**52))
32464 xa = xa + 2**52 - 2**52;
32465 return copysign (xa, operand1);
32467 enum machine_mode mode = GET_MODE (operand0);
32468 rtx res, xa, label, TWO52, mask;
32470 res = gen_reg_rtx (mode);
32471 emit_move_insn (res, operand1);
32473 /* xa = abs (operand1) */
32474 xa = ix86_expand_sse_fabs (res, &mask);
32476 /* if (!isless (xa, TWO52)) goto label; */
32477 TWO52 = ix86_gen_TWO52 (mode);
32478 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32480 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32481 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32483 ix86_sse_copysign_to_positive (res, xa, res, mask);
32485 emit_label (label);
32486 LABEL_NUSES (label) = 1;
32488 emit_move_insn (operand0, res);
32491 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32494 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
32496 /* C code for the stuff we expand below.
32497 double xa = fabs (x), x2;
32498 if (!isless (xa, TWO52))
32500 xa = xa + TWO52 - TWO52;
32501 x2 = copysign (xa, x);
32510 enum machine_mode mode = GET_MODE (operand0);
32511 rtx xa, TWO52, tmp, label, one, res, mask;
32513 TWO52 = ix86_gen_TWO52 (mode);
32515 /* Temporary for holding the result, initialized to the input
32516 operand to ease control flow. */
32517 res = gen_reg_rtx (mode);
32518 emit_move_insn (res, operand1);
32520 /* xa = abs (operand1) */
32521 xa = ix86_expand_sse_fabs (res, &mask);
32523 /* if (!isless (xa, TWO52)) goto label; */
32524 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32526 /* xa = xa + TWO52 - TWO52; */
32527 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32528 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32530 /* xa = copysign (xa, operand1) */
32531 ix86_sse_copysign_to_positive (xa, xa, res, mask);
32533 /* generate 1.0 or -1.0 */
32534 one = force_reg (mode,
32535 const_double_from_real_value (do_floor
32536 ? dconst1 : dconstm1, mode));
32538 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32539 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32540 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32541 gen_rtx_AND (mode, one, tmp)));
32542 /* We always need to subtract here to preserve signed zero. */
32543 tmp = expand_simple_binop (mode, MINUS,
32544 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32545 emit_move_insn (res, tmp);
32547 emit_label (label);
32548 LABEL_NUSES (label) = 1;
32550 emit_move_insn (operand0, res);
32553 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32556 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
32558 /* C code for the stuff we expand below.
32559 double xa = fabs (x), x2;
32560 if (!isless (xa, TWO52))
32562 x2 = (double)(long)x;
32569 if (HONOR_SIGNED_ZEROS (mode))
32570 return copysign (x2, x);
32573 enum machine_mode mode = GET_MODE (operand0);
32574 rtx xa, xi, TWO52, tmp, label, one, res, mask;
32576 TWO52 = ix86_gen_TWO52 (mode);
32578 /* Temporary for holding the result, initialized to the input
32579 operand to ease control flow. */
32580 res = gen_reg_rtx (mode);
32581 emit_move_insn (res, operand1);
32583 /* xa = abs (operand1) */
32584 xa = ix86_expand_sse_fabs (res, &mask);
32586 /* if (!isless (xa, TWO52)) goto label; */
32587 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32589 /* xa = (double)(long)x */
32590 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32591 expand_fix (xi, res, 0);
32592 expand_float (xa, xi, 0);
32595 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32597 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32598 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32599 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32600 gen_rtx_AND (mode, one, tmp)));
32601 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
32602 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32603 emit_move_insn (res, tmp);
32605 if (HONOR_SIGNED_ZEROS (mode))
32606 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32608 emit_label (label);
32609 LABEL_NUSES (label) = 1;
32611 emit_move_insn (operand0, res);
32614 /* Expand SSE sequence for computing round from OPERAND1 storing
32615 into OPERAND0. Sequence that works without relying on DImode truncation
32616 via cvttsd2siq that is only available on 64bit targets. */
32618 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
32620 /* C code for the stuff we expand below.
32621 double xa = fabs (x), xa2, x2;
32622 if (!isless (xa, TWO52))
32624 Using the absolute value and copying back sign makes
32625 -0.0 -> -0.0 correct.
32626 xa2 = xa + TWO52 - TWO52;
32631 else if (dxa > 0.5)
32633 x2 = copysign (xa2, x);
32636 enum machine_mode mode = GET_MODE (operand0);
32637 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
32639 TWO52 = ix86_gen_TWO52 (mode);
32641 /* Temporary for holding the result, initialized to the input
32642 operand to ease control flow. */
32643 res = gen_reg_rtx (mode);
32644 emit_move_insn (res, operand1);
32646 /* xa = abs (operand1) */
32647 xa = ix86_expand_sse_fabs (res, &mask);
32649 /* if (!isless (xa, TWO52)) goto label; */
32650 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32652 /* xa2 = xa + TWO52 - TWO52; */
32653 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32654 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
32656 /* dxa = xa2 - xa; */
32657 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
32659 /* generate 0.5, 1.0 and -0.5 */
32660 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
32661 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
32662 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
32666 tmp = gen_reg_rtx (mode);
32667 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
32668 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
32669 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32670 gen_rtx_AND (mode, one, tmp)));
32671 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32672 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
32673 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
32674 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32675 gen_rtx_AND (mode, one, tmp)));
32676 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32678 /* res = copysign (xa2, operand1) */
32679 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
32681 emit_label (label);
32682 LABEL_NUSES (label) = 1;
32684 emit_move_insn (operand0, res);
32687 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32690 ix86_expand_trunc (rtx operand0, rtx operand1)
32692 /* C code for SSE variant we expand below.
32693 double xa = fabs (x), x2;
32694 if (!isless (xa, TWO52))
32696 x2 = (double)(long)x;
32697 if (HONOR_SIGNED_ZEROS (mode))
32698 return copysign (x2, x);
32701 enum machine_mode mode = GET_MODE (operand0);
32702 rtx xa, xi, TWO52, label, res, mask;
32704 TWO52 = ix86_gen_TWO52 (mode);
32706 /* Temporary for holding the result, initialized to the input
32707 operand to ease control flow. */
32708 res = gen_reg_rtx (mode);
32709 emit_move_insn (res, operand1);
32711 /* xa = abs (operand1) */
32712 xa = ix86_expand_sse_fabs (res, &mask);
32714 /* if (!isless (xa, TWO52)) goto label; */
32715 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32717 /* x = (double)(long)x */
32718 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32719 expand_fix (xi, res, 0);
32720 expand_float (res, xi, 0);
32722 if (HONOR_SIGNED_ZEROS (mode))
32723 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32725 emit_label (label);
32726 LABEL_NUSES (label) = 1;
32728 emit_move_insn (operand0, res);
32731 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32734 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
32736 enum machine_mode mode = GET_MODE (operand0);
32737 rtx xa, mask, TWO52, label, one, res, smask, tmp;
32739 /* C code for SSE variant we expand below.
32740 double xa = fabs (x), x2;
32741 if (!isless (xa, TWO52))
32743 xa2 = xa + TWO52 - TWO52;
32747 x2 = copysign (xa2, x);
32751 TWO52 = ix86_gen_TWO52 (mode);
32753 /* Temporary for holding the result, initialized to the input
32754 operand to ease control flow. */
32755 res = gen_reg_rtx (mode);
32756 emit_move_insn (res, operand1);
32758 /* xa = abs (operand1) */
32759 xa = ix86_expand_sse_fabs (res, &smask);
32761 /* if (!isless (xa, TWO52)) goto label; */
32762 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32764 /* res = xa + TWO52 - TWO52; */
32765 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32766 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
32767 emit_move_insn (res, tmp);
32770 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32772 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
32773 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
32774 emit_insn (gen_rtx_SET (VOIDmode, mask,
32775 gen_rtx_AND (mode, mask, one)));
32776 tmp = expand_simple_binop (mode, MINUS,
32777 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
32778 emit_move_insn (res, tmp);
32780 /* res = copysign (res, operand1) */
32781 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
32783 emit_label (label);
32784 LABEL_NUSES (label) = 1;
32786 emit_move_insn (operand0, res);
32789 /* Expand SSE sequence for computing round from OPERAND1 storing
32792 ix86_expand_round (rtx operand0, rtx operand1)
32794 /* C code for the stuff we're doing below:
32795 double xa = fabs (x);
32796 if (!isless (xa, TWO52))
32798 xa = (double)(long)(xa + nextafter (0.5, 0.0));
32799 return copysign (xa, x);
32801 enum machine_mode mode = GET_MODE (operand0);
32802 rtx res, TWO52, xa, label, xi, half, mask;
32803 const struct real_format *fmt;
32804 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32806 /* Temporary for holding the result, initialized to the input
32807 operand to ease control flow. */
32808 res = gen_reg_rtx (mode);
32809 emit_move_insn (res, operand1);
32811 TWO52 = ix86_gen_TWO52 (mode);
32812 xa = ix86_expand_sse_fabs (res, &mask);
32813 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32815 /* load nextafter (0.5, 0.0) */
32816 fmt = REAL_MODE_FORMAT (mode);
32817 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32818 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32820 /* xa = xa + 0.5 */
32821 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
32822 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
32824 /* xa = (double)(int64_t)xa */
32825 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32826 expand_fix (xi, xa, 0);
32827 expand_float (xa, xi, 0);
32829 /* res = copysign (xa, operand1) */
32830 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
32832 emit_label (label);
32833 LABEL_NUSES (label) = 1;
32835 emit_move_insn (operand0, res);
32839 /* Table of valid machine attributes. */
32840 static const struct attribute_spec ix86_attribute_table[] =
32842 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
32843 affects_type_identity } */
32844 /* Stdcall attribute says callee is responsible for popping arguments
32845 if they are not variable. */
32846 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32848 /* Fastcall attribute says callee is responsible for popping arguments
32849 if they are not variable. */
32850 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32852 /* Thiscall attribute says callee is responsible for popping arguments
32853 if they are not variable. */
32854 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32856 /* Cdecl attribute says the callee is a normal C declaration */
32857 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32859 /* Regparm attribute specifies how many integer arguments are to be
32860 passed in registers. */
32861 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute,
32863 /* Sseregparm attribute says we are using x86_64 calling conventions
32864 for FP arguments. */
32865 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32867 /* force_align_arg_pointer says this function realigns the stack at entry. */
32868 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
32869 false, true, true, ix86_handle_cconv_attribute, false },
32870 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
32871 { "dllimport", 0, 0, false, false, false, handle_dll_attribute, false },
32872 { "dllexport", 0, 0, false, false, false, handle_dll_attribute, false },
32873 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute,
32876 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
32878 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
32880 #ifdef SUBTARGET_ATTRIBUTE_TABLE
32881 SUBTARGET_ATTRIBUTE_TABLE,
32883 /* ms_abi and sysv_abi calling convention function attributes. */
32884 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
32885 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
32886 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute,
32888 { "callee_pop_aggregate_return", 1, 1, false, true, true,
32889 ix86_handle_callee_pop_aggregate_return, true },
32891 { NULL, 0, 0, false, false, false, NULL, false }
32894 /* Implement targetm.vectorize.builtin_vectorization_cost. */
32896 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
32897 tree vectype ATTRIBUTE_UNUSED,
32898 int misalign ATTRIBUTE_UNUSED)
32900 switch (type_of_cost)
32903 return ix86_cost->scalar_stmt_cost;
32906 return ix86_cost->scalar_load_cost;
32909 return ix86_cost->scalar_store_cost;
32912 return ix86_cost->vec_stmt_cost;
32915 return ix86_cost->vec_align_load_cost;
32918 return ix86_cost->vec_store_cost;
32920 case vec_to_scalar:
32921 return ix86_cost->vec_to_scalar_cost;
32923 case scalar_to_vec:
32924 return ix86_cost->scalar_to_vec_cost;
32926 case unaligned_load:
32927 case unaligned_store:
32928 return ix86_cost->vec_unalign_load_cost;
32930 case cond_branch_taken:
32931 return ix86_cost->cond_taken_branch_cost;
32933 case cond_branch_not_taken:
32934 return ix86_cost->cond_not_taken_branch_cost;
32940 gcc_unreachable ();
32945 /* Implement targetm.vectorize.builtin_vec_perm. */
32948 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
32950 tree itype = TREE_TYPE (vec_type);
32951 bool u = TYPE_UNSIGNED (itype);
32952 enum machine_mode vmode = TYPE_MODE (vec_type);
32953 enum ix86_builtins fcode;
32954 bool ok = TARGET_SSE2;
32960 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
32963 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
32965 itype = ix86_get_builtin_type (IX86_BT_DI);
32970 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
32974 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
32976 itype = ix86_get_builtin_type (IX86_BT_SI);
32980 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
32983 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
32986 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
32989 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
32999 *mask_type = itype;
33000 return ix86_builtins[(int) fcode];
33003 /* Return a vector mode with twice as many elements as VMODE. */
33004 /* ??? Consider moving this to a table generated by genmodes.c. */
33006 static enum machine_mode
33007 doublesize_vector_mode (enum machine_mode vmode)
33011 case V2SFmode: return V4SFmode;
33012 case V1DImode: return V2DImode;
33013 case V2SImode: return V4SImode;
33014 case V4HImode: return V8HImode;
33015 case V8QImode: return V16QImode;
33017 case V2DFmode: return V4DFmode;
33018 case V4SFmode: return V8SFmode;
33019 case V2DImode: return V4DImode;
33020 case V4SImode: return V8SImode;
33021 case V8HImode: return V16HImode;
33022 case V16QImode: return V32QImode;
33024 case V4DFmode: return V8DFmode;
33025 case V8SFmode: return V16SFmode;
33026 case V4DImode: return V8DImode;
33027 case V8SImode: return V16SImode;
33028 case V16HImode: return V32HImode;
33029 case V32QImode: return V64QImode;
33032 gcc_unreachable ();
33036 /* Construct (set target (vec_select op0 (parallel perm))) and
33037 return true if that's a valid instruction in the active ISA. */
33040 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
33042 rtx rperm[MAX_VECT_LEN], x;
33045 for (i = 0; i < nelt; ++i)
33046 rperm[i] = GEN_INT (perm[i]);
33048 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
33049 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
33050 x = gen_rtx_SET (VOIDmode, target, x);
33053 if (recog_memoized (x) < 0)
33061 /* Similar, but generate a vec_concat from op0 and op1 as well. */
33064 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
33065 const unsigned char *perm, unsigned nelt)
33067 enum machine_mode v2mode;
33070 v2mode = doublesize_vector_mode (GET_MODE (op0));
33071 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
33072 return expand_vselect (target, x, perm, nelt);
33075 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33076 in terms of blendp[sd] / pblendw / pblendvb. */
33079 expand_vec_perm_blend (struct expand_vec_perm_d *d)
33081 enum machine_mode vmode = d->vmode;
33082 unsigned i, mask, nelt = d->nelt;
33083 rtx target, op0, op1, x;
33085 if (!TARGET_SSE4_1 || d->op0 == d->op1)
33087 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
33090 /* This is a blend, not a permute. Elements must stay in their
33091 respective lanes. */
33092 for (i = 0; i < nelt; ++i)
33094 unsigned e = d->perm[i];
33095 if (!(e == i || e == i + nelt))
33102 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
33103 decision should be extracted elsewhere, so that we only try that
33104 sequence once all budget==3 options have been tried. */
33106 /* For bytes, see if bytes move in pairs so we can use pblendw with
33107 an immediate argument, rather than pblendvb with a vector argument. */
33108 if (vmode == V16QImode)
33110 bool pblendw_ok = true;
33111 for (i = 0; i < 16 && pblendw_ok; i += 2)
33112 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
33116 rtx rperm[16], vperm;
33118 for (i = 0; i < nelt; ++i)
33119 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
33121 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
33122 vperm = force_reg (V16QImode, vperm);
33124 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
33129 target = d->target;
33141 for (i = 0; i < nelt; ++i)
33142 mask |= (d->perm[i] >= nelt) << i;
33146 for (i = 0; i < 2; ++i)
33147 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
33151 for (i = 0; i < 4; ++i)
33152 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
33156 for (i = 0; i < 8; ++i)
33157 mask |= (d->perm[i * 2] >= 16) << i;
33161 target = gen_lowpart (vmode, target);
33162 op0 = gen_lowpart (vmode, op0);
33163 op1 = gen_lowpart (vmode, op1);
33167 gcc_unreachable ();
33170 /* This matches five different patterns with the different modes. */
33171 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
33172 x = gen_rtx_SET (VOIDmode, target, x);
33178 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33179 in terms of the variable form of vpermilps.
33181 Note that we will have already failed the immediate input vpermilps,
33182 which requires that the high and low part shuffle be identical; the
33183 variable form doesn't require that. */
33186 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
33188 rtx rperm[8], vperm;
33191 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
33194 /* We can only permute within the 128-bit lane. */
33195 for (i = 0; i < 8; ++i)
33197 unsigned e = d->perm[i];
33198 if (i < 4 ? e >= 4 : e < 4)
33205 for (i = 0; i < 8; ++i)
33207 unsigned e = d->perm[i];
33209 /* Within each 128-bit lane, the elements of op0 are numbered
33210 from 0 and the elements of op1 are numbered from 4. */
33216 rperm[i] = GEN_INT (e);
33219 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
33220 vperm = force_reg (V8SImode, vperm);
33221 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
33226 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33227 in terms of pshufb or vpperm. */
33230 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
33232 unsigned i, nelt, eltsz;
33233 rtx rperm[16], vperm, target, op0, op1;
33235 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
33237 if (GET_MODE_SIZE (d->vmode) != 16)
33244 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
33246 for (i = 0; i < nelt; ++i)
33248 unsigned j, e = d->perm[i];
33249 for (j = 0; j < eltsz; ++j)
33250 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
33253 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
33254 vperm = force_reg (V16QImode, vperm);
33256 target = gen_lowpart (V16QImode, d->target);
33257 op0 = gen_lowpart (V16QImode, d->op0);
33258 if (d->op0 == d->op1)
33259 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
33262 op1 = gen_lowpart (V16QImode, d->op1);
33263 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
33269 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
33270 in a single instruction. */
33273 expand_vec_perm_1 (struct expand_vec_perm_d *d)
33275 unsigned i, nelt = d->nelt;
33276 unsigned char perm2[MAX_VECT_LEN];
33278 /* Check plain VEC_SELECT first, because AVX has instructions that could
33279 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
33280 input where SEL+CONCAT may not. */
33281 if (d->op0 == d->op1)
33283 int mask = nelt - 1;
33285 for (i = 0; i < nelt; i++)
33286 perm2[i] = d->perm[i] & mask;
33288 if (expand_vselect (d->target, d->op0, perm2, nelt))
33291 /* There are plenty of patterns in sse.md that are written for
33292 SEL+CONCAT and are not replicated for a single op. Perhaps
33293 that should be changed, to avoid the nastiness here. */
33295 /* Recognize interleave style patterns, which means incrementing
33296 every other permutation operand. */
33297 for (i = 0; i < nelt; i += 2)
33299 perm2[i] = d->perm[i] & mask;
33300 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
33302 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
33305 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
33308 for (i = 0; i < nelt; i += 4)
33310 perm2[i + 0] = d->perm[i + 0] & mask;
33311 perm2[i + 1] = d->perm[i + 1] & mask;
33312 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
33313 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
33316 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
33321 /* Finally, try the fully general two operand permute. */
33322 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
33325 /* Recognize interleave style patterns with reversed operands. */
33326 if (d->op0 != d->op1)
33328 for (i = 0; i < nelt; ++i)
33330 unsigned e = d->perm[i];
33338 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
33342 /* Try the SSE4.1 blend variable merge instructions. */
33343 if (expand_vec_perm_blend (d))
33346 /* Try one of the AVX vpermil variable permutations. */
33347 if (expand_vec_perm_vpermil (d))
33350 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
33351 if (expand_vec_perm_pshufb (d))
33357 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33358 in terms of a pair of pshuflw + pshufhw instructions. */
33361 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
33363 unsigned char perm2[MAX_VECT_LEN];
33367 if (d->vmode != V8HImode || d->op0 != d->op1)
33370 /* The two permutations only operate in 64-bit lanes. */
33371 for (i = 0; i < 4; ++i)
33372 if (d->perm[i] >= 4)
33374 for (i = 4; i < 8; ++i)
33375 if (d->perm[i] < 4)
33381 /* Emit the pshuflw. */
33382 memcpy (perm2, d->perm, 4);
33383 for (i = 4; i < 8; ++i)
33385 ok = expand_vselect (d->target, d->op0, perm2, 8);
33388 /* Emit the pshufhw. */
33389 memcpy (perm2 + 4, d->perm + 4, 4);
33390 for (i = 0; i < 4; ++i)
33392 ok = expand_vselect (d->target, d->target, perm2, 8);
33398 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33399 the permutation using the SSSE3 palignr instruction. This succeeds
33400 when all of the elements in PERM fit within one vector and we merely
33401 need to shift them down so that a single vector permutation has a
33402 chance to succeed. */
33405 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
33407 unsigned i, nelt = d->nelt;
33412 /* Even with AVX, palignr only operates on 128-bit vectors. */
33413 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33416 min = nelt, max = 0;
33417 for (i = 0; i < nelt; ++i)
33419 unsigned e = d->perm[i];
33425 if (min == 0 || max - min >= nelt)
33428 /* Given that we have SSSE3, we know we'll be able to implement the
33429 single operand permutation after the palignr with pshufb. */
33433 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
33434 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
33435 gen_lowpart (TImode, d->op1),
33436 gen_lowpart (TImode, d->op0), shift));
33438 d->op0 = d->op1 = d->target;
33441 for (i = 0; i < nelt; ++i)
33443 unsigned e = d->perm[i] - min;
33449 /* Test for the degenerate case where the alignment by itself
33450 produces the desired permutation. */
33454 ok = expand_vec_perm_1 (d);
33460 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33461 a two vector permutation into a single vector permutation by using
33462 an interleave operation to merge the vectors. */
33465 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
33467 struct expand_vec_perm_d dremap, dfinal;
33468 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
33469 unsigned contents, h1, h2, h3, h4;
33470 unsigned char remap[2 * MAX_VECT_LEN];
33474 if (d->op0 == d->op1)
33477 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
33478 lanes. We can use similar techniques with the vperm2f128 instruction,
33479 but it requires slightly different logic. */
33480 if (GET_MODE_SIZE (d->vmode) != 16)
33483 /* Examine from whence the elements come. */
33485 for (i = 0; i < nelt; ++i)
33486 contents |= 1u << d->perm[i];
33488 /* Split the two input vectors into 4 halves. */
33489 h1 = (1u << nelt2) - 1;
33494 memset (remap, 0xff, sizeof (remap));
33497 /* If the elements from the low halves use interleave low, and similarly
33498 for interleave high. If the elements are from mis-matched halves, we
33499 can use shufps for V4SF/V4SI or do a DImode shuffle. */
33500 if ((contents & (h1 | h3)) == contents)
33502 for (i = 0; i < nelt2; ++i)
33505 remap[i + nelt] = i * 2 + 1;
33506 dremap.perm[i * 2] = i;
33507 dremap.perm[i * 2 + 1] = i + nelt;
33510 else if ((contents & (h2 | h4)) == contents)
33512 for (i = 0; i < nelt2; ++i)
33514 remap[i + nelt2] = i * 2;
33515 remap[i + nelt + nelt2] = i * 2 + 1;
33516 dremap.perm[i * 2] = i + nelt2;
33517 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
33520 else if ((contents & (h1 | h4)) == contents)
33522 for (i = 0; i < nelt2; ++i)
33525 remap[i + nelt + nelt2] = i + nelt2;
33526 dremap.perm[i] = i;
33527 dremap.perm[i + nelt2] = i + nelt + nelt2;
33531 dremap.vmode = V2DImode;
33533 dremap.perm[0] = 0;
33534 dremap.perm[1] = 3;
33537 else if ((contents & (h2 | h3)) == contents)
33539 for (i = 0; i < nelt2; ++i)
33541 remap[i + nelt2] = i;
33542 remap[i + nelt] = i + nelt2;
33543 dremap.perm[i] = i + nelt2;
33544 dremap.perm[i + nelt2] = i + nelt;
33548 dremap.vmode = V2DImode;
33550 dremap.perm[0] = 1;
33551 dremap.perm[1] = 2;
33557 /* Use the remapping array set up above to move the elements from their
33558 swizzled locations into their final destinations. */
33560 for (i = 0; i < nelt; ++i)
33562 unsigned e = remap[d->perm[i]];
33563 gcc_assert (e < nelt);
33564 dfinal.perm[i] = e;
33566 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
33567 dfinal.op1 = dfinal.op0;
33568 dremap.target = dfinal.op0;
33570 /* Test if the final remap can be done with a single insn. For V4SFmode or
33571 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
33573 ok = expand_vec_perm_1 (&dfinal);
33574 seq = get_insns ();
33580 if (dremap.vmode != dfinal.vmode)
33582 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
33583 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
33584 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
33587 ok = expand_vec_perm_1 (&dremap);
33594 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
33595 permutation with two pshufb insns and an ior. We should have already
33596 failed all two instruction sequences. */
33599 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
33601 rtx rperm[2][16], vperm, l, h, op, m128;
33602 unsigned int i, nelt, eltsz;
33604 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33606 gcc_assert (d->op0 != d->op1);
33609 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
33611 /* Generate two permutation masks. If the required element is within
33612 the given vector it is shuffled into the proper lane. If the required
33613 element is in the other vector, force a zero into the lane by setting
33614 bit 7 in the permutation mask. */
33615 m128 = GEN_INT (-128);
33616 for (i = 0; i < nelt; ++i)
33618 unsigned j, e = d->perm[i];
33619 unsigned which = (e >= nelt);
33623 for (j = 0; j < eltsz; ++j)
33625 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
33626 rperm[1-which][i*eltsz + j] = m128;
33630 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
33631 vperm = force_reg (V16QImode, vperm);
33633 l = gen_reg_rtx (V16QImode);
33634 op = gen_lowpart (V16QImode, d->op0);
33635 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
33637 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
33638 vperm = force_reg (V16QImode, vperm);
33640 h = gen_reg_rtx (V16QImode);
33641 op = gen_lowpart (V16QImode, d->op1);
33642 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
33644 op = gen_lowpart (V16QImode, d->target);
33645 emit_insn (gen_iorv16qi3 (op, l, h));
33650 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
33651 and extract-odd permutations. */
33654 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
33661 t1 = gen_reg_rtx (V4DFmode);
33662 t2 = gen_reg_rtx (V4DFmode);
33664 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
33665 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
33666 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
33668 /* Now an unpck[lh]pd will produce the result required. */
33670 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
33672 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
33678 int mask = odd ? 0xdd : 0x88;
33680 t1 = gen_reg_rtx (V8SFmode);
33681 t2 = gen_reg_rtx (V8SFmode);
33682 t3 = gen_reg_rtx (V8SFmode);
33684 /* Shuffle within the 128-bit lanes to produce:
33685 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */
33686 emit_insn (gen_avx_shufps256 (t1, d->op0, d->op1,
33689 /* Shuffle the lanes around to produce:
33690 { 4 6 c e 0 2 8 a } and { 5 7 d f 1 3 9 b }. */
33691 emit_insn (gen_avx_vperm2f128v8sf3 (t2, t1, t1,
33694 /* Shuffle within the 128-bit lanes to produce:
33695 { 0 2 4 6 4 6 0 2 } | { 1 3 5 7 5 7 1 3 }. */
33696 emit_insn (gen_avx_shufps256 (t3, t1, t2, GEN_INT (0x44)));
33698 /* Shuffle within the 128-bit lanes to produce:
33699 { 8 a c e c e 8 a } | { 9 b d f d f 9 b }. */
33700 emit_insn (gen_avx_shufps256 (t2, t1, t2, GEN_INT (0xee)));
33702 /* Shuffle the lanes around to produce:
33703 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */
33704 emit_insn (gen_avx_vperm2f128v8sf3 (d->target, t3, t2,
33713 /* These are always directly implementable by expand_vec_perm_1. */
33714 gcc_unreachable ();
33718 return expand_vec_perm_pshufb2 (d);
33721 /* We need 2*log2(N)-1 operations to achieve odd/even
33722 with interleave. */
33723 t1 = gen_reg_rtx (V8HImode);
33724 t2 = gen_reg_rtx (V8HImode);
33725 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
33726 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
33727 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
33728 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
33730 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
33732 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
33739 return expand_vec_perm_pshufb2 (d);
33742 t1 = gen_reg_rtx (V16QImode);
33743 t2 = gen_reg_rtx (V16QImode);
33744 t3 = gen_reg_rtx (V16QImode);
33745 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
33746 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
33747 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
33748 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
33749 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
33750 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
33752 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
33754 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
33760 gcc_unreachable ();
33766 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33767 extract-even and extract-odd permutations. */
33770 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
33772 unsigned i, odd, nelt = d->nelt;
33775 if (odd != 0 && odd != 1)
33778 for (i = 1; i < nelt; ++i)
33779 if (d->perm[i] != 2 * i + odd)
33782 return expand_vec_perm_even_odd_1 (d, odd);
33785 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
33786 permutations. We assume that expand_vec_perm_1 has already failed. */
33789 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
33791 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
33792 enum machine_mode vmode = d->vmode;
33793 unsigned char perm2[4];
33801 /* These are special-cased in sse.md so that we can optionally
33802 use the vbroadcast instruction. They expand to two insns
33803 if the input happens to be in a register. */
33804 gcc_unreachable ();
33810 /* These are always implementable using standard shuffle patterns. */
33811 gcc_unreachable ();
33815 /* These can be implemented via interleave. We save one insn by
33816 stopping once we have promoted to V4SImode and then use pshufd. */
33819 optab otab = vec_interleave_low_optab;
33823 otab = vec_interleave_high_optab;
33828 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
33829 vmode = get_mode_wider_vector (vmode);
33830 op0 = gen_lowpart (vmode, op0);
33832 while (vmode != V4SImode);
33834 memset (perm2, elt, 4);
33835 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
33840 gcc_unreachable ();
33844 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33845 broadcast permutations. */
33848 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
33850 unsigned i, elt, nelt = d->nelt;
33852 if (d->op0 != d->op1)
33856 for (i = 1; i < nelt; ++i)
33857 if (d->perm[i] != elt)
33860 return expand_vec_perm_broadcast_1 (d);
33863 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
33864 With all of the interface bits taken care of, perform the expansion
33865 in D and return true on success. */
33868 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
33870 /* Try a single instruction expansion. */
33871 if (expand_vec_perm_1 (d))
33874 /* Try sequences of two instructions. */
33876 if (expand_vec_perm_pshuflw_pshufhw (d))
33879 if (expand_vec_perm_palignr (d))
33882 if (expand_vec_perm_interleave2 (d))
33885 if (expand_vec_perm_broadcast (d))
33888 /* Try sequences of three instructions. */
33890 if (expand_vec_perm_pshufb2 (d))
33893 /* ??? Look for narrow permutations whose element orderings would
33894 allow the promotion to a wider mode. */
33896 /* ??? Look for sequences of interleave or a wider permute that place
33897 the data into the correct lanes for a half-vector shuffle like
33898 pshuf[lh]w or vpermilps. */
33900 /* ??? Look for sequences of interleave that produce the desired results.
33901 The combinatorics of punpck[lh] get pretty ugly... */
33903 if (expand_vec_perm_even_odd (d))
33909 /* Extract the values from the vector CST into the permutation array in D.
33910 Return 0 on error, 1 if all values from the permutation come from the
33911 first vector, 2 if all values from the second vector, and 3 otherwise. */
33914 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
33916 tree list = TREE_VECTOR_CST_ELTS (cst);
33917 unsigned i, nelt = d->nelt;
33920 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
33922 unsigned HOST_WIDE_INT e;
33924 if (!host_integerp (TREE_VALUE (list), 1))
33926 e = tree_low_cst (TREE_VALUE (list), 1);
33930 ret |= (e < nelt ? 1 : 2);
33933 gcc_assert (list == NULL);
33935 /* For all elements from second vector, fold the elements to first. */
33937 for (i = 0; i < nelt; ++i)
33938 d->perm[i] -= nelt;
33944 ix86_expand_vec_perm_builtin (tree exp)
33946 struct expand_vec_perm_d d;
33947 tree arg0, arg1, arg2;
33949 arg0 = CALL_EXPR_ARG (exp, 0);
33950 arg1 = CALL_EXPR_ARG (exp, 1);
33951 arg2 = CALL_EXPR_ARG (exp, 2);
33953 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
33954 d.nelt = GET_MODE_NUNITS (d.vmode);
33955 d.testing_p = false;
33956 gcc_assert (VECTOR_MODE_P (d.vmode));
33958 if (TREE_CODE (arg2) != VECTOR_CST)
33960 error_at (EXPR_LOCATION (exp),
33961 "vector permutation requires vector constant");
33965 switch (extract_vec_perm_cst (&d, arg2))
33971 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
33975 if (!operand_equal_p (arg0, arg1, 0))
33977 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33978 d.op0 = force_reg (d.vmode, d.op0);
33979 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33980 d.op1 = force_reg (d.vmode, d.op1);
33984 /* The elements of PERM do not suggest that only the first operand
33985 is used, but both operands are identical. Allow easier matching
33986 of the permutation by folding the permutation into the single
33989 unsigned i, nelt = d.nelt;
33990 for (i = 0; i < nelt; ++i)
33991 if (d.perm[i] >= nelt)
33997 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33998 d.op0 = force_reg (d.vmode, d.op0);
34003 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
34004 d.op0 = force_reg (d.vmode, d.op0);
34009 d.target = gen_reg_rtx (d.vmode);
34010 if (ix86_expand_vec_perm_builtin_1 (&d))
34013 /* For compiler generated permutations, we should never got here, because
34014 the compiler should also be checking the ok hook. But since this is a
34015 builtin the user has access too, so don't abort. */
34019 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
34022 sorry ("vector permutation (%d %d %d %d)",
34023 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
34026 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
34027 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
34028 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
34031 sorry ("vector permutation "
34032 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
34033 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
34034 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
34035 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
34036 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
34039 gcc_unreachable ();
34042 return CONST0_RTX (d.vmode);
34045 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
34048 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
34050 struct expand_vec_perm_d d;
34054 d.vmode = TYPE_MODE (vec_type);
34055 d.nelt = GET_MODE_NUNITS (d.vmode);
34056 d.testing_p = true;
34058 /* Given sufficient ISA support we can just return true here
34059 for selected vector modes. */
34060 if (GET_MODE_SIZE (d.vmode) == 16)
34062 /* All implementable with a single vpperm insn. */
34065 /* All implementable with 2 pshufb + 1 ior. */
34068 /* All implementable with shufpd or unpck[lh]pd. */
34073 vec_mask = extract_vec_perm_cst (&d, mask);
34075 /* This hook is cannot be called in response to something that the
34076 user does (unlike the builtin expander) so we shouldn't ever see
34077 an error generated from the extract. */
34078 gcc_assert (vec_mask > 0 && vec_mask <= 3);
34079 one_vec = (vec_mask != 3);
34081 /* Implementable with shufps or pshufd. */
34082 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
34085 /* Otherwise we have to go through the motions and see if we can
34086 figure out how to generate the requested permutation. */
34087 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
34088 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
34090 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
34093 ret = ix86_expand_vec_perm_builtin_1 (&d);
34100 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
34102 struct expand_vec_perm_d d;
34108 d.vmode = GET_MODE (targ);
34109 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
34110 d.testing_p = false;
34112 for (i = 0; i < nelt; ++i)
34113 d.perm[i] = i * 2 + odd;
34115 /* We'll either be able to implement the permutation directly... */
34116 if (expand_vec_perm_1 (&d))
34119 /* ... or we use the special-case patterns. */
34120 expand_vec_perm_even_odd_1 (&d, odd);
34123 /* Expand an insert into a vector register through pinsr insn.
34124 Return true if successful. */
34127 ix86_expand_pinsr (rtx *operands)
34129 rtx dst = operands[0];
34130 rtx src = operands[3];
34132 unsigned int size = INTVAL (operands[1]);
34133 unsigned int pos = INTVAL (operands[2]);
34135 if (GET_CODE (dst) == SUBREG)
34137 pos += SUBREG_BYTE (dst) * BITS_PER_UNIT;
34138 dst = SUBREG_REG (dst);
34141 if (GET_CODE (src) == SUBREG)
34142 src = SUBREG_REG (src);
34144 switch (GET_MODE (dst))
34151 enum machine_mode srcmode, dstmode;
34152 rtx (*pinsr)(rtx, rtx, rtx, rtx);
34154 srcmode = mode_for_size (size, MODE_INT, 0);
34159 if (!TARGET_SSE4_1)
34161 dstmode = V16QImode;
34162 pinsr = gen_sse4_1_pinsrb;
34168 dstmode = V8HImode;
34169 pinsr = gen_sse2_pinsrw;
34173 if (!TARGET_SSE4_1)
34175 dstmode = V4SImode;
34176 pinsr = gen_sse4_1_pinsrd;
34180 gcc_assert (TARGET_64BIT);
34181 if (!TARGET_SSE4_1)
34183 dstmode = V2DImode;
34184 pinsr = gen_sse4_1_pinsrq;
34191 dst = gen_lowpart (dstmode, dst);
34192 src = gen_lowpart (srcmode, src);
34196 emit_insn (pinsr (dst, dst, src, GEN_INT (1 << pos)));
34205 /* This function returns the calling abi specific va_list type node.
34206 It returns the FNDECL specific va_list type. */
34209 ix86_fn_abi_va_list (tree fndecl)
34212 return va_list_type_node;
34213 gcc_assert (fndecl != NULL_TREE);
34215 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
34216 return ms_va_list_type_node;
34218 return sysv_va_list_type_node;
34221 /* Returns the canonical va_list type specified by TYPE. If there
34222 is no valid TYPE provided, it return NULL_TREE. */
34225 ix86_canonical_va_list_type (tree type)
34229 /* Resolve references and pointers to va_list type. */
34230 if (TREE_CODE (type) == MEM_REF)
34231 type = TREE_TYPE (type);
34232 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
34233 type = TREE_TYPE (type);
34234 else if (POINTER_TYPE_P (type) && TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
34235 type = TREE_TYPE (type);
34237 if (TARGET_64BIT && va_list_type_node != NULL_TREE)
34239 wtype = va_list_type_node;
34240 gcc_assert (wtype != NULL_TREE);
34242 if (TREE_CODE (wtype) == ARRAY_TYPE)
34244 /* If va_list is an array type, the argument may have decayed
34245 to a pointer type, e.g. by being passed to another function.
34246 In that case, unwrap both types so that we can compare the
34247 underlying records. */
34248 if (TREE_CODE (htype) == ARRAY_TYPE
34249 || POINTER_TYPE_P (htype))
34251 wtype = TREE_TYPE (wtype);
34252 htype = TREE_TYPE (htype);
34255 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34256 return va_list_type_node;
34257 wtype = sysv_va_list_type_node;
34258 gcc_assert (wtype != NULL_TREE);
34260 if (TREE_CODE (wtype) == ARRAY_TYPE)
34262 /* If va_list is an array type, the argument may have decayed
34263 to a pointer type, e.g. by being passed to another function.
34264 In that case, unwrap both types so that we can compare the
34265 underlying records. */
34266 if (TREE_CODE (htype) == ARRAY_TYPE
34267 || POINTER_TYPE_P (htype))
34269 wtype = TREE_TYPE (wtype);
34270 htype = TREE_TYPE (htype);
34273 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34274 return sysv_va_list_type_node;
34275 wtype = ms_va_list_type_node;
34276 gcc_assert (wtype != NULL_TREE);
34278 if (TREE_CODE (wtype) == ARRAY_TYPE)
34280 /* If va_list is an array type, the argument may have decayed
34281 to a pointer type, e.g. by being passed to another function.
34282 In that case, unwrap both types so that we can compare the
34283 underlying records. */
34284 if (TREE_CODE (htype) == ARRAY_TYPE
34285 || POINTER_TYPE_P (htype))
34287 wtype = TREE_TYPE (wtype);
34288 htype = TREE_TYPE (htype);
34291 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
34292 return ms_va_list_type_node;
34295 return std_canonical_va_list_type (type);
34298 /* Iterate through the target-specific builtin types for va_list.
34299 IDX denotes the iterator, *PTREE is set to the result type of
34300 the va_list builtin, and *PNAME to its internal type.
34301 Returns zero if there is no element for this index, otherwise
34302 IDX should be increased upon the next call.
34303 Note, do not iterate a base builtin's name like __builtin_va_list.
34304 Used from c_common_nodes_and_builtins. */
34307 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
34317 *ptree = ms_va_list_type_node;
34318 *pname = "__builtin_ms_va_list";
34322 *ptree = sysv_va_list_type_node;
34323 *pname = "__builtin_sysv_va_list";
34331 #undef TARGET_SCHED_DISPATCH
34332 #define TARGET_SCHED_DISPATCH has_dispatch
34333 #undef TARGET_SCHED_DISPATCH_DO
34334 #define TARGET_SCHED_DISPATCH_DO do_dispatch
34336 /* The size of the dispatch window is the total number of bytes of
34337 object code allowed in a window. */
34338 #define DISPATCH_WINDOW_SIZE 16
34340 /* Number of dispatch windows considered for scheduling. */
34341 #define MAX_DISPATCH_WINDOWS 3
34343 /* Maximum number of instructions in a window. */
34346 /* Maximum number of immediate operands in a window. */
34349 /* Maximum number of immediate bits allowed in a window. */
34350 #define MAX_IMM_SIZE 128
34352 /* Maximum number of 32 bit immediates allowed in a window. */
34353 #define MAX_IMM_32 4
34355 /* Maximum number of 64 bit immediates allowed in a window. */
34356 #define MAX_IMM_64 2
34358 /* Maximum total of loads or prefetches allowed in a window. */
34361 /* Maximum total of stores allowed in a window. */
34362 #define MAX_STORE 1
34368 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
34369 enum dispatch_group {
34384 /* Number of allowable groups in a dispatch window. It is an array
34385 indexed by dispatch_group enum. 100 is used as a big number,
34386 because the number of these kind of operations does not have any
34387 effect in dispatch window, but we need them for other reasons in
34389 static unsigned int num_allowable_groups[disp_last] = {
34390 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG, BIG
34393 char group_name[disp_last + 1][16] = {
34394 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
34395 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
34396 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
34399 /* Instruction path. */
34402 path_single, /* Single micro op. */
34403 path_double, /* Double micro op. */
34404 path_multi, /* Instructions with more than 2 micro op.. */
34408 /* sched_insn_info defines a window to the instructions scheduled in
34409 the basic block. It contains a pointer to the insn_info table and
34410 the instruction scheduled.
34412 Windows are allocated for each basic block and are linked
34414 typedef struct sched_insn_info_s {
34416 enum dispatch_group group;
34417 enum insn_path path;
34422 /* Linked list of dispatch windows. This is a two way list of
34423 dispatch windows of a basic block. It contains information about
34424 the number of uops in the window and the total number of
34425 instructions and of bytes in the object code for this dispatch
34427 typedef struct dispatch_windows_s {
34428 int num_insn; /* Number of insn in the window. */
34429 int num_uops; /* Number of uops in the window. */
34430 int window_size; /* Number of bytes in the window. */
34431 int window_num; /* Window number between 0 or 1. */
34432 int num_imm; /* Number of immediates in an insn. */
34433 int num_imm_32; /* Number of 32 bit immediates in an insn. */
34434 int num_imm_64; /* Number of 64 bit immediates in an insn. */
34435 int imm_size; /* Total immediates in the window. */
34436 int num_loads; /* Total memory loads in the window. */
34437 int num_stores; /* Total memory stores in the window. */
34438 int violation; /* Violation exists in window. */
34439 sched_insn_info *window; /* Pointer to the window. */
34440 struct dispatch_windows_s *next;
34441 struct dispatch_windows_s *prev;
34442 } dispatch_windows;
34444 /* Immediate valuse used in an insn. */
34445 typedef struct imm_info_s
34452 static dispatch_windows *dispatch_window_list;
34453 static dispatch_windows *dispatch_window_list1;
34455 /* Get dispatch group of insn. */
34457 static enum dispatch_group
34458 get_mem_group (rtx insn)
34460 enum attr_memory memory;
34462 if (INSN_CODE (insn) < 0)
34463 return disp_no_group;
34464 memory = get_attr_memory (insn);
34465 if (memory == MEMORY_STORE)
34468 if (memory == MEMORY_LOAD)
34471 if (memory == MEMORY_BOTH)
34472 return disp_load_store;
34474 return disp_no_group;
34477 /* Return true if insn is a compare instruction. */
34482 enum attr_type type;
34484 type = get_attr_type (insn);
34485 return (type == TYPE_TEST
34486 || type == TYPE_ICMP
34487 || type == TYPE_FCMP
34488 || GET_CODE (PATTERN (insn)) == COMPARE);
34491 /* Return true if a dispatch violation encountered. */
34494 dispatch_violation (void)
34496 if (dispatch_window_list->next)
34497 return dispatch_window_list->next->violation;
34498 return dispatch_window_list->violation;
34501 /* Return true if insn is a branch instruction. */
34504 is_branch (rtx insn)
34506 return (CALL_P (insn) || JUMP_P (insn));
34509 /* Return true if insn is a prefetch instruction. */
34512 is_prefetch (rtx insn)
34514 return NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == PREFETCH;
34517 /* This function initializes a dispatch window and the list container holding a
34518 pointer to the window. */
34521 init_window (int window_num)
34524 dispatch_windows *new_list;
34526 if (window_num == 0)
34527 new_list = dispatch_window_list;
34529 new_list = dispatch_window_list1;
34531 new_list->num_insn = 0;
34532 new_list->num_uops = 0;
34533 new_list->window_size = 0;
34534 new_list->next = NULL;
34535 new_list->prev = NULL;
34536 new_list->window_num = window_num;
34537 new_list->num_imm = 0;
34538 new_list->num_imm_32 = 0;
34539 new_list->num_imm_64 = 0;
34540 new_list->imm_size = 0;
34541 new_list->num_loads = 0;
34542 new_list->num_stores = 0;
34543 new_list->violation = false;
34545 for (i = 0; i < MAX_INSN; i++)
34547 new_list->window[i].insn = NULL;
34548 new_list->window[i].group = disp_no_group;
34549 new_list->window[i].path = no_path;
34550 new_list->window[i].byte_len = 0;
34551 new_list->window[i].imm_bytes = 0;
34556 /* This function allocates and initializes a dispatch window and the
34557 list container holding a pointer to the window. */
34559 static dispatch_windows *
34560 allocate_window (void)
34562 dispatch_windows *new_list = XNEW (struct dispatch_windows_s);
34563 new_list->window = XNEWVEC (struct sched_insn_info_s, MAX_INSN + 1);
34568 /* This routine initializes the dispatch scheduling information. It
34569 initiates building dispatch scheduler tables and constructs the
34570 first dispatch window. */
34573 init_dispatch_sched (void)
34575 /* Allocate a dispatch list and a window. */
34576 dispatch_window_list = allocate_window ();
34577 dispatch_window_list1 = allocate_window ();
34582 /* This function returns true if a branch is detected. End of a basic block
34583 does not have to be a branch, but here we assume only branches end a
34587 is_end_basic_block (enum dispatch_group group)
34589 return group == disp_branch;
34592 /* This function is called when the end of a window processing is reached. */
34595 process_end_window (void)
34597 gcc_assert (dispatch_window_list->num_insn <= MAX_INSN);
34598 if (dispatch_window_list->next)
34600 gcc_assert (dispatch_window_list1->num_insn <= MAX_INSN);
34601 gcc_assert (dispatch_window_list->window_size
34602 + dispatch_window_list1->window_size <= 48);
34608 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
34609 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
34610 for 48 bytes of instructions. Note that these windows are not dispatch
34611 windows that their sizes are DISPATCH_WINDOW_SIZE. */
34613 static dispatch_windows *
34614 allocate_next_window (int window_num)
34616 if (window_num == 0)
34618 if (dispatch_window_list->next)
34621 return dispatch_window_list;
34624 dispatch_window_list->next = dispatch_window_list1;
34625 dispatch_window_list1->prev = dispatch_window_list;
34627 return dispatch_window_list1;
34630 /* Increment the number of immediate operands of an instruction. */
34633 find_constant_1 (rtx *in_rtx, imm_info *imm_values)
34638 switch ( GET_CODE (*in_rtx))
34643 (imm_values->imm)++;
34644 if (x86_64_immediate_operand (*in_rtx, SImode))
34645 (imm_values->imm32)++;
34647 (imm_values->imm64)++;
34651 (imm_values->imm)++;
34652 (imm_values->imm64)++;
34656 if (LABEL_KIND (*in_rtx) == LABEL_NORMAL)
34658 (imm_values->imm)++;
34659 (imm_values->imm32)++;
34670 /* Compute number of immediate operands of an instruction. */
34673 find_constant (rtx in_rtx, imm_info *imm_values)
34675 for_each_rtx (INSN_P (in_rtx) ? &PATTERN (in_rtx) : &in_rtx,
34676 (rtx_function) find_constant_1, (void *) imm_values);
34679 /* Return total size of immediate operands of an instruction along with number
34680 of corresponding immediate-operands. It initializes its parameters to zero
34681 befor calling FIND_CONSTANT.
34682 INSN is the input instruction. IMM is the total of immediates.
34683 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
34687 get_num_immediates (rtx insn, int *imm, int *imm32, int *imm64)
34689 imm_info imm_values = {0, 0, 0};
34691 find_constant (insn, &imm_values);
34692 *imm = imm_values.imm;
34693 *imm32 = imm_values.imm32;
34694 *imm64 = imm_values.imm64;
34695 return imm_values.imm32 * 4 + imm_values.imm64 * 8;
34698 /* This function indicates if an operand of an instruction is an
34702 has_immediate (rtx insn)
34704 int num_imm_operand;
34705 int num_imm32_operand;
34706 int num_imm64_operand;
34709 return get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34710 &num_imm64_operand);
34714 /* Return single or double path for instructions. */
34716 static enum insn_path
34717 get_insn_path (rtx insn)
34719 enum attr_amdfam10_decode path = get_attr_amdfam10_decode (insn);
34721 if ((int)path == 0)
34722 return path_single;
34724 if ((int)path == 1)
34725 return path_double;
34730 /* Return insn dispatch group. */
34732 static enum dispatch_group
34733 get_insn_group (rtx insn)
34735 enum dispatch_group group = get_mem_group (insn);
34739 if (is_branch (insn))
34740 return disp_branch;
34745 if (has_immediate (insn))
34748 if (is_prefetch (insn))
34749 return disp_prefetch;
34751 return disp_no_group;
34754 /* Count number of GROUP restricted instructions in a dispatch
34755 window WINDOW_LIST. */
34758 count_num_restricted (rtx insn, dispatch_windows *window_list)
34760 enum dispatch_group group = get_insn_group (insn);
34762 int num_imm_operand;
34763 int num_imm32_operand;
34764 int num_imm64_operand;
34766 if (group == disp_no_group)
34769 if (group == disp_imm)
34771 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34772 &num_imm64_operand);
34773 if (window_list->imm_size + imm_size > MAX_IMM_SIZE
34774 || num_imm_operand + window_list->num_imm > MAX_IMM
34775 || (num_imm32_operand > 0
34776 && (window_list->num_imm_32 + num_imm32_operand > MAX_IMM_32
34777 || window_list->num_imm_64 * 2 + num_imm32_operand > MAX_IMM_32))
34778 || (num_imm64_operand > 0
34779 && (window_list->num_imm_64 + num_imm64_operand > MAX_IMM_64
34780 || window_list->num_imm_32 + num_imm64_operand * 2 > MAX_IMM_32))
34781 || (window_list->imm_size + imm_size == MAX_IMM_SIZE
34782 && num_imm64_operand > 0
34783 && ((window_list->num_imm_64 > 0
34784 && window_list->num_insn >= 2)
34785 || window_list->num_insn >= 3)))
34791 if ((group == disp_load_store
34792 && (window_list->num_loads >= MAX_LOAD
34793 || window_list->num_stores >= MAX_STORE))
34794 || ((group == disp_load
34795 || group == disp_prefetch)
34796 && window_list->num_loads >= MAX_LOAD)
34797 || (group == disp_store
34798 && window_list->num_stores >= MAX_STORE))
34804 /* This function returns true if insn satisfies dispatch rules on the
34805 last window scheduled. */
34808 fits_dispatch_window (rtx insn)
34810 dispatch_windows *window_list = dispatch_window_list;
34811 dispatch_windows *window_list_next = dispatch_window_list->next;
34812 unsigned int num_restrict;
34813 enum dispatch_group group = get_insn_group (insn);
34814 enum insn_path path = get_insn_path (insn);
34817 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
34818 instructions should be given the lowest priority in the
34819 scheduling process in Haifa scheduler to make sure they will be
34820 scheduled in the same dispatch window as the refrence to them. */
34821 if (group == disp_jcc || group == disp_cmp)
34824 /* Check nonrestricted. */
34825 if (group == disp_no_group || group == disp_branch)
34828 /* Get last dispatch window. */
34829 if (window_list_next)
34830 window_list = window_list_next;
34832 if (window_list->window_num == 1)
34834 sum = window_list->prev->window_size + window_list->window_size;
34837 || (min_insn_size (insn) + sum) >= 48)
34838 /* Window 1 is full. Go for next window. */
34842 num_restrict = count_num_restricted (insn, window_list);
34844 if (num_restrict > num_allowable_groups[group])
34847 /* See if it fits in the first window. */
34848 if (window_list->window_num == 0)
34850 /* The first widow should have only single and double path
34852 if (path == path_double
34853 && (window_list->num_uops + 2) > MAX_INSN)
34855 else if (path != path_single)
34861 /* Add an instruction INSN with NUM_UOPS micro-operations to the
34862 dispatch window WINDOW_LIST. */
34865 add_insn_window (rtx insn, dispatch_windows *window_list, int num_uops)
34867 int byte_len = min_insn_size (insn);
34868 int num_insn = window_list->num_insn;
34870 sched_insn_info *window = window_list->window;
34871 enum dispatch_group group = get_insn_group (insn);
34872 enum insn_path path = get_insn_path (insn);
34873 int num_imm_operand;
34874 int num_imm32_operand;
34875 int num_imm64_operand;
34877 if (!window_list->violation && group != disp_cmp
34878 && !fits_dispatch_window (insn))
34879 window_list->violation = true;
34881 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34882 &num_imm64_operand);
34884 /* Initialize window with new instruction. */
34885 window[num_insn].insn = insn;
34886 window[num_insn].byte_len = byte_len;
34887 window[num_insn].group = group;
34888 window[num_insn].path = path;
34889 window[num_insn].imm_bytes = imm_size;
34891 window_list->window_size += byte_len;
34892 window_list->num_insn = num_insn + 1;
34893 window_list->num_uops = window_list->num_uops + num_uops;
34894 window_list->imm_size += imm_size;
34895 window_list->num_imm += num_imm_operand;
34896 window_list->num_imm_32 += num_imm32_operand;
34897 window_list->num_imm_64 += num_imm64_operand;
34899 if (group == disp_store)
34900 window_list->num_stores += 1;
34901 else if (group == disp_load
34902 || group == disp_prefetch)
34903 window_list->num_loads += 1;
34904 else if (group == disp_load_store)
34906 window_list->num_stores += 1;
34907 window_list->num_loads += 1;
34911 /* Adds a scheduled instruction, INSN, to the current dispatch window.
34912 If the total bytes of instructions or the number of instructions in
34913 the window exceed allowable, it allocates a new window. */
34916 add_to_dispatch_window (rtx insn)
34919 dispatch_windows *window_list;
34920 dispatch_windows *next_list;
34921 dispatch_windows *window0_list;
34922 enum insn_path path;
34923 enum dispatch_group insn_group;
34931 if (INSN_CODE (insn) < 0)
34934 byte_len = min_insn_size (insn);
34935 window_list = dispatch_window_list;
34936 next_list = window_list->next;
34937 path = get_insn_path (insn);
34938 insn_group = get_insn_group (insn);
34940 /* Get the last dispatch window. */
34942 window_list = dispatch_window_list->next;
34944 if (path == path_single)
34946 else if (path == path_double)
34949 insn_num_uops = (int) path;
34951 /* If current window is full, get a new window.
34952 Window number zero is full, if MAX_INSN uops are scheduled in it.
34953 Window number one is full, if window zero's bytes plus window
34954 one's bytes is 32, or if the bytes of the new instruction added
34955 to the total makes it greater than 48, or it has already MAX_INSN
34956 instructions in it. */
34957 num_insn = window_list->num_insn;
34958 num_uops = window_list->num_uops;
34959 window_num = window_list->window_num;
34960 insn_fits = fits_dispatch_window (insn);
34962 if (num_insn >= MAX_INSN
34963 || num_uops + insn_num_uops > MAX_INSN
34966 window_num = ~window_num & 1;
34967 window_list = allocate_next_window (window_num);
34970 if (window_num == 0)
34972 add_insn_window (insn, window_list, insn_num_uops);
34973 if (window_list->num_insn >= MAX_INSN
34974 && insn_group == disp_branch)
34976 process_end_window ();
34980 else if (window_num == 1)
34982 window0_list = window_list->prev;
34983 sum = window0_list->window_size + window_list->window_size;
34985 || (byte_len + sum) >= 48)
34987 process_end_window ();
34988 window_list = dispatch_window_list;
34991 add_insn_window (insn, window_list, insn_num_uops);
34994 gcc_unreachable ();
34996 if (is_end_basic_block (insn_group))
34998 /* End of basic block is reached do end-basic-block process. */
34999 process_end_window ();
35004 /* Print the dispatch window, WINDOW_NUM, to FILE. */
35006 DEBUG_FUNCTION static void
35007 debug_dispatch_window_file (FILE *file, int window_num)
35009 dispatch_windows *list;
35012 if (window_num == 0)
35013 list = dispatch_window_list;
35015 list = dispatch_window_list1;
35017 fprintf (file, "Window #%d:\n", list->window_num);
35018 fprintf (file, " num_insn = %d, num_uops = %d, window_size = %d\n",
35019 list->num_insn, list->num_uops, list->window_size);
35020 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
35021 list->num_imm, list->num_imm_32, list->num_imm_64, list->imm_size);
35023 fprintf (file, " num_loads = %d, num_stores = %d\n", list->num_loads,
35025 fprintf (file, " insn info:\n");
35027 for (i = 0; i < MAX_INSN; i++)
35029 if (!list->window[i].insn)
35031 fprintf (file, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
35032 i, group_name[list->window[i].group],
35033 i, (void *)list->window[i].insn,
35034 i, list->window[i].path,
35035 i, list->window[i].byte_len,
35036 i, list->window[i].imm_bytes);
35040 /* Print to stdout a dispatch window. */
35042 DEBUG_FUNCTION void
35043 debug_dispatch_window (int window_num)
35045 debug_dispatch_window_file (stdout, window_num);
35048 /* Print INSN dispatch information to FILE. */
35050 DEBUG_FUNCTION static void
35051 debug_insn_dispatch_info_file (FILE *file, rtx insn)
35054 enum insn_path path;
35055 enum dispatch_group group;
35057 int num_imm_operand;
35058 int num_imm32_operand;
35059 int num_imm64_operand;
35061 if (INSN_CODE (insn) < 0)
35064 byte_len = min_insn_size (insn);
35065 path = get_insn_path (insn);
35066 group = get_insn_group (insn);
35067 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
35068 &num_imm64_operand);
35070 fprintf (file, " insn info:\n");
35071 fprintf (file, " group = %s, path = %d, byte_len = %d\n",
35072 group_name[group], path, byte_len);
35073 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
35074 num_imm_operand, num_imm32_operand, num_imm64_operand, imm_size);
35077 /* Print to STDERR the status of the ready list with respect to
35078 dispatch windows. */
35080 DEBUG_FUNCTION void
35081 debug_ready_dispatch (void)
35084 int no_ready = number_in_ready ();
35086 fprintf (stdout, "Number of ready: %d\n", no_ready);
35088 for (i = 0; i < no_ready; i++)
35089 debug_insn_dispatch_info_file (stdout, get_ready_element (i));
35092 /* This routine is the driver of the dispatch scheduler. */
35095 do_dispatch (rtx insn, int mode)
35097 if (mode == DISPATCH_INIT)
35098 init_dispatch_sched ();
35099 else if (mode == ADD_TO_DISPATCH_WINDOW)
35100 add_to_dispatch_window (insn);
35103 /* Return TRUE if Dispatch Scheduling is supported. */
35106 has_dispatch (rtx insn, int action)
35108 if (ix86_tune == PROCESSOR_BDVER1 && flag_dispatch_scheduler)
35114 case IS_DISPATCH_ON:
35119 return is_cmp (insn);
35121 case DISPATCH_VIOLATION:
35122 return dispatch_violation ();
35124 case FITS_DISPATCH_WINDOW:
35125 return fits_dispatch_window (insn);
35131 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
35132 place emms and femms instructions. */
35134 static enum machine_mode
35135 ix86_preferred_simd_mode (enum machine_mode mode)
35137 /* Disable double precision vectorizer if needed. */
35138 if (mode == DFmode && !TARGET_VECTORIZE_DOUBLE)
35141 if (!TARGET_AVX && !TARGET_SSE)
35147 return (TARGET_AVX && !flag_prefer_avx128) ? V8SFmode : V4SFmode;
35149 return (TARGET_AVX && !flag_prefer_avx128) ? V4DFmode : V2DFmode;
35165 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
35168 static unsigned int
35169 ix86_autovectorize_vector_sizes (void)
35171 return TARGET_AVX ? 32 | 16 : 0;
35174 /* Initialize the GCC target structure. */
35175 #undef TARGET_RETURN_IN_MEMORY
35176 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
35178 #undef TARGET_LEGITIMIZE_ADDRESS
35179 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
35181 #undef TARGET_ATTRIBUTE_TABLE
35182 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
35183 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
35184 # undef TARGET_MERGE_DECL_ATTRIBUTES
35185 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
35188 #undef TARGET_COMP_TYPE_ATTRIBUTES
35189 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
35191 #undef TARGET_INIT_BUILTINS
35192 #define TARGET_INIT_BUILTINS ix86_init_builtins
35193 #undef TARGET_BUILTIN_DECL
35194 #define TARGET_BUILTIN_DECL ix86_builtin_decl
35195 #undef TARGET_EXPAND_BUILTIN
35196 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
35198 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
35199 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
35200 ix86_builtin_vectorized_function
35202 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
35203 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
35205 #undef TARGET_BUILTIN_RECIPROCAL
35206 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
35208 #undef TARGET_ASM_FUNCTION_EPILOGUE
35209 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
35211 #undef TARGET_ENCODE_SECTION_INFO
35212 #ifndef SUBTARGET_ENCODE_SECTION_INFO
35213 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
35215 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
35218 #undef TARGET_ASM_OPEN_PAREN
35219 #define TARGET_ASM_OPEN_PAREN ""
35220 #undef TARGET_ASM_CLOSE_PAREN
35221 #define TARGET_ASM_CLOSE_PAREN ""
35223 #undef TARGET_ASM_BYTE_OP
35224 #define TARGET_ASM_BYTE_OP ASM_BYTE
35226 #undef TARGET_ASM_ALIGNED_HI_OP
35227 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
35228 #undef TARGET_ASM_ALIGNED_SI_OP
35229 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
35231 #undef TARGET_ASM_ALIGNED_DI_OP
35232 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
35235 #undef TARGET_PROFILE_BEFORE_PROLOGUE
35236 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
35238 #undef TARGET_ASM_UNALIGNED_HI_OP
35239 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
35240 #undef TARGET_ASM_UNALIGNED_SI_OP
35241 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
35242 #undef TARGET_ASM_UNALIGNED_DI_OP
35243 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
35245 #undef TARGET_PRINT_OPERAND
35246 #define TARGET_PRINT_OPERAND ix86_print_operand
35247 #undef TARGET_PRINT_OPERAND_ADDRESS
35248 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
35249 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
35250 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
35251 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
35252 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
35254 #undef TARGET_SCHED_INIT_GLOBAL
35255 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
35256 #undef TARGET_SCHED_ADJUST_COST
35257 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
35258 #undef TARGET_SCHED_ISSUE_RATE
35259 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
35260 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
35261 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
35262 ia32_multipass_dfa_lookahead
35264 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
35265 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
35268 #undef TARGET_HAVE_TLS
35269 #define TARGET_HAVE_TLS true
35271 #undef TARGET_CANNOT_FORCE_CONST_MEM
35272 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
35273 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
35274 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
35276 #undef TARGET_DELEGITIMIZE_ADDRESS
35277 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
35279 #undef TARGET_MS_BITFIELD_LAYOUT_P
35280 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
35283 #undef TARGET_BINDS_LOCAL_P
35284 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
35286 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
35287 #undef TARGET_BINDS_LOCAL_P
35288 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
35291 #undef TARGET_ASM_OUTPUT_MI_THUNK
35292 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
35293 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
35294 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
35296 #undef TARGET_ASM_FILE_START
35297 #define TARGET_ASM_FILE_START x86_file_start
35299 #undef TARGET_DEFAULT_TARGET_FLAGS
35300 #define TARGET_DEFAULT_TARGET_FLAGS \
35302 | TARGET_SUBTARGET_DEFAULT \
35303 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
35305 #undef TARGET_HANDLE_OPTION
35306 #define TARGET_HANDLE_OPTION ix86_handle_option
35308 #undef TARGET_OPTION_OVERRIDE
35309 #define TARGET_OPTION_OVERRIDE ix86_option_override
35310 #undef TARGET_OPTION_OPTIMIZATION_TABLE
35311 #define TARGET_OPTION_OPTIMIZATION_TABLE ix86_option_optimization_table
35312 #undef TARGET_OPTION_INIT_STRUCT
35313 #define TARGET_OPTION_INIT_STRUCT ix86_option_init_struct
35315 #undef TARGET_REGISTER_MOVE_COST
35316 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
35317 #undef TARGET_MEMORY_MOVE_COST
35318 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
35319 #undef TARGET_RTX_COSTS
35320 #define TARGET_RTX_COSTS ix86_rtx_costs
35321 #undef TARGET_ADDRESS_COST
35322 #define TARGET_ADDRESS_COST ix86_address_cost
35324 #undef TARGET_FIXED_CONDITION_CODE_REGS
35325 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
35326 #undef TARGET_CC_MODES_COMPATIBLE
35327 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
35329 #undef TARGET_MACHINE_DEPENDENT_REORG
35330 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
35332 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
35333 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
35335 #undef TARGET_BUILD_BUILTIN_VA_LIST
35336 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
35338 #undef TARGET_ENUM_VA_LIST_P
35339 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
35341 #undef TARGET_FN_ABI_VA_LIST
35342 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
35344 #undef TARGET_CANONICAL_VA_LIST_TYPE
35345 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
35347 #undef TARGET_EXPAND_BUILTIN_VA_START
35348 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
35350 #undef TARGET_MD_ASM_CLOBBERS
35351 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
35353 #undef TARGET_PROMOTE_PROTOTYPES
35354 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
35355 #undef TARGET_STRUCT_VALUE_RTX
35356 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
35357 #undef TARGET_SETUP_INCOMING_VARARGS
35358 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
35359 #undef TARGET_MUST_PASS_IN_STACK
35360 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
35361 #undef TARGET_FUNCTION_ARG_ADVANCE
35362 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
35363 #undef TARGET_FUNCTION_ARG
35364 #define TARGET_FUNCTION_ARG ix86_function_arg
35365 #undef TARGET_FUNCTION_ARG_BOUNDARY
35366 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
35367 #undef TARGET_PASS_BY_REFERENCE
35368 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
35369 #undef TARGET_INTERNAL_ARG_POINTER
35370 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
35371 #undef TARGET_UPDATE_STACK_BOUNDARY
35372 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
35373 #undef TARGET_GET_DRAP_RTX
35374 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
35375 #undef TARGET_STRICT_ARGUMENT_NAMING
35376 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
35377 #undef TARGET_STATIC_CHAIN
35378 #define TARGET_STATIC_CHAIN ix86_static_chain
35379 #undef TARGET_TRAMPOLINE_INIT
35380 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
35381 #undef TARGET_RETURN_POPS_ARGS
35382 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
35384 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
35385 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
35387 #undef TARGET_SCALAR_MODE_SUPPORTED_P
35388 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
35390 #undef TARGET_VECTOR_MODE_SUPPORTED_P
35391 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
35393 #undef TARGET_C_MODE_FOR_SUFFIX
35394 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
35397 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
35398 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
35401 #ifdef SUBTARGET_INSERT_ATTRIBUTES
35402 #undef TARGET_INSERT_ATTRIBUTES
35403 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
35406 #undef TARGET_MANGLE_TYPE
35407 #define TARGET_MANGLE_TYPE ix86_mangle_type
35409 #undef TARGET_STACK_PROTECT_FAIL
35410 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
35412 #undef TARGET_SUPPORTS_SPLIT_STACK
35413 #define TARGET_SUPPORTS_SPLIT_STACK ix86_supports_split_stack
35415 #undef TARGET_FUNCTION_VALUE
35416 #define TARGET_FUNCTION_VALUE ix86_function_value
35418 #undef TARGET_FUNCTION_VALUE_REGNO_P
35419 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
35421 #undef TARGET_SECONDARY_RELOAD
35422 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
35424 #undef TARGET_PREFERRED_RELOAD_CLASS
35425 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
35426 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
35427 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
35428 #undef TARGET_CLASS_LIKELY_SPILLED_P
35429 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
35431 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
35432 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
35433 ix86_builtin_vectorization_cost
35434 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
35435 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
35436 ix86_vectorize_builtin_vec_perm
35437 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
35438 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
35439 ix86_vectorize_builtin_vec_perm_ok
35440 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
35441 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
35442 ix86_preferred_simd_mode
35443 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
35444 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
35445 ix86_autovectorize_vector_sizes
35447 #undef TARGET_SET_CURRENT_FUNCTION
35448 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
35450 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
35451 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
35453 #undef TARGET_OPTION_SAVE
35454 #define TARGET_OPTION_SAVE ix86_function_specific_save
35456 #undef TARGET_OPTION_RESTORE
35457 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
35459 #undef TARGET_OPTION_PRINT
35460 #define TARGET_OPTION_PRINT ix86_function_specific_print
35462 #undef TARGET_CAN_INLINE_P
35463 #define TARGET_CAN_INLINE_P ix86_can_inline_p
35465 #undef TARGET_EXPAND_TO_RTL_HOOK
35466 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
35468 #undef TARGET_LEGITIMATE_ADDRESS_P
35469 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
35471 #undef TARGET_LEGITIMATE_CONSTANT_P
35472 #define TARGET_LEGITIMATE_CONSTANT_P ix86_legitimate_constant_p
35474 #undef TARGET_FRAME_POINTER_REQUIRED
35475 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
35477 #undef TARGET_CAN_ELIMINATE
35478 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
35480 #undef TARGET_EXTRA_LIVE_ON_ENTRY
35481 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
35483 #undef TARGET_ASM_CODE_END
35484 #define TARGET_ASM_CODE_END ix86_code_end
35486 #undef TARGET_CONDITIONAL_REGISTER_USAGE
35487 #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage
35490 #undef TARGET_INIT_LIBFUNCS
35491 #define TARGET_INIT_LIBFUNCS darwin_rename_builtins
35494 struct gcc_target targetm = TARGET_INITIALIZER;
35496 #include "gt-i386.h"