1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
42 #include "diagnostic-core.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "common/common-target.h"
49 #include "langhooks.h"
54 #include "tm-constrs.h"
58 #include "sched-int.h"
62 #include "diagnostic.h"
64 enum upper_128bits_state
71 typedef struct block_info_def
73 /* State of the upper 128bits of AVX registers at exit. */
74 enum upper_128bits_state state;
75 /* TRUE if state of the upper 128bits of AVX registers is unchanged
78 /* TRUE if block has been processed. */
80 /* TRUE if block has been scanned. */
82 /* Previous state of the upper 128bits of AVX registers at entry. */
83 enum upper_128bits_state prev;
86 #define BLOCK_INFO(B) ((block_info) (B)->aux)
88 enum call_avx256_state
90 /* Callee returns 256bit AVX register. */
91 callee_return_avx256 = -1,
92 /* Callee returns and passes 256bit AVX register. */
93 callee_return_pass_avx256,
94 /* Callee passes 256bit AVX register. */
96 /* Callee doesn't return nor passe 256bit AVX register, or no
97 256bit AVX register in function return. */
99 /* vzeroupper intrinsic. */
103 /* Check if a 256bit AVX register is referenced in stores. */
106 check_avx256_stores (rtx dest, const_rtx set, void *data)
109 && VALID_AVX256_REG_MODE (GET_MODE (dest)))
110 || (GET_CODE (set) == SET
111 && REG_P (SET_SRC (set))
112 && VALID_AVX256_REG_MODE (GET_MODE (SET_SRC (set)))))
114 enum upper_128bits_state *state
115 = (enum upper_128bits_state *) data;
120 /* Helper function for move_or_delete_vzeroupper_1. Look for vzeroupper
121 in basic block BB. Delete it if upper 128bit AVX registers are
122 unused. If it isn't deleted, move it to just before a jump insn.
124 STATE is state of the upper 128bits of AVX registers at entry. */
127 move_or_delete_vzeroupper_2 (basic_block bb,
128 enum upper_128bits_state state)
131 rtx vzeroupper_insn = NULL_RTX;
136 if (BLOCK_INFO (bb)->unchanged)
139 fprintf (dump_file, " [bb %i] unchanged: upper 128bits: %d\n",
142 BLOCK_INFO (bb)->state = state;
146 if (BLOCK_INFO (bb)->scanned && BLOCK_INFO (bb)->prev == state)
149 fprintf (dump_file, " [bb %i] scanned: upper 128bits: %d\n",
150 bb->index, BLOCK_INFO (bb)->state);
154 BLOCK_INFO (bb)->prev = state;
157 fprintf (dump_file, " [bb %i] entry: upper 128bits: %d\n",
162 /* BB_END changes when it is deleted. */
163 bb_end = BB_END (bb);
165 while (insn != bb_end)
167 insn = NEXT_INSN (insn);
169 if (!NONDEBUG_INSN_P (insn))
172 /* Move vzeroupper before jump/call. */
173 if (JUMP_P (insn) || CALL_P (insn))
175 if (!vzeroupper_insn)
178 if (PREV_INSN (insn) != vzeroupper_insn)
182 fprintf (dump_file, "Move vzeroupper after:\n");
183 print_rtl_single (dump_file, PREV_INSN (insn));
184 fprintf (dump_file, "before:\n");
185 print_rtl_single (dump_file, insn);
187 reorder_insns_nobb (vzeroupper_insn, vzeroupper_insn,
190 vzeroupper_insn = NULL_RTX;
194 pat = PATTERN (insn);
196 /* Check insn for vzeroupper intrinsic. */
197 if (GET_CODE (pat) == UNSPEC_VOLATILE
198 && XINT (pat, 1) == UNSPECV_VZEROUPPER)
202 /* Found vzeroupper intrinsic. */
203 fprintf (dump_file, "Found vzeroupper:\n");
204 print_rtl_single (dump_file, insn);
209 /* Check insn for vzeroall intrinsic. */
210 if (GET_CODE (pat) == PARALLEL
211 && GET_CODE (XVECEXP (pat, 0, 0)) == UNSPEC_VOLATILE
212 && XINT (XVECEXP (pat, 0, 0), 1) == UNSPECV_VZEROALL)
217 /* Delete pending vzeroupper insertion. */
220 delete_insn (vzeroupper_insn);
221 vzeroupper_insn = NULL_RTX;
224 else if (state != used)
226 note_stores (pat, check_avx256_stores, &state);
233 /* Process vzeroupper intrinsic. */
234 avx256 = INTVAL (XVECEXP (pat, 0, 0));
238 /* Since the upper 128bits are cleared, callee must not pass
239 256bit AVX register. We only need to check if callee
240 returns 256bit AVX register. */
241 if (avx256 == callee_return_avx256)
247 /* Remove unnecessary vzeroupper since upper 128bits are
251 fprintf (dump_file, "Delete redundant vzeroupper:\n");
252 print_rtl_single (dump_file, insn);
258 /* Set state to UNUSED if callee doesn't return 256bit AVX
260 if (avx256 != callee_return_pass_avx256)
263 if (avx256 == callee_return_pass_avx256
264 || avx256 == callee_pass_avx256)
266 /* Must remove vzeroupper since callee passes in 256bit
270 fprintf (dump_file, "Delete callee pass vzeroupper:\n");
271 print_rtl_single (dump_file, insn);
277 vzeroupper_insn = insn;
283 BLOCK_INFO (bb)->state = state;
284 BLOCK_INFO (bb)->unchanged = unchanged;
285 BLOCK_INFO (bb)->scanned = true;
288 fprintf (dump_file, " [bb %i] exit: %s: upper 128bits: %d\n",
289 bb->index, unchanged ? "unchanged" : "changed",
293 /* Helper function for move_or_delete_vzeroupper. Process vzeroupper
294 in BLOCK and check its predecessor blocks. Treat UNKNOWN state
295 as USED if UNKNOWN_IS_UNUSED is true. Return TRUE if the exit
299 move_or_delete_vzeroupper_1 (basic_block block, bool unknown_is_unused)
303 enum upper_128bits_state state, old_state, new_state;
307 fprintf (dump_file, " Process [bb %i]: status: %d\n",
308 block->index, BLOCK_INFO (block)->processed);
310 if (BLOCK_INFO (block)->processed)
315 /* Check all predecessor edges of this block. */
316 seen_unknown = false;
317 FOR_EACH_EDGE (e, ei, block->preds)
321 switch (BLOCK_INFO (e->src)->state)
324 if (!unknown_is_unused)
338 old_state = BLOCK_INFO (block)->state;
339 move_or_delete_vzeroupper_2 (block, state);
340 new_state = BLOCK_INFO (block)->state;
342 if (state != unknown || new_state == used)
343 BLOCK_INFO (block)->processed = true;
345 /* Need to rescan if the upper 128bits of AVX registers are changed
347 if (new_state != old_state)
349 if (new_state == used)
350 cfun->machine->rescan_vzeroupper_p = 1;
357 /* Go through the instruction stream looking for vzeroupper. Delete
358 it if upper 128bit AVX registers are unused. If it isn't deleted,
359 move it to just before a jump insn. */
362 move_or_delete_vzeroupper (void)
367 fibheap_t worklist, pending, fibheap_swap;
368 sbitmap visited, in_worklist, in_pending, sbitmap_swap;
373 /* Set up block info for each basic block. */
374 alloc_aux_for_blocks (sizeof (struct block_info_def));
376 /* Process outgoing edges of entry point. */
378 fprintf (dump_file, "Process outgoing edges of entry point\n");
380 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
382 move_or_delete_vzeroupper_2 (e->dest,
383 cfun->machine->caller_pass_avx256_p
385 BLOCK_INFO (e->dest)->processed = true;
388 /* Compute reverse completion order of depth first search of the CFG
389 so that the data-flow runs faster. */
390 rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
391 bb_order = XNEWVEC (int, last_basic_block);
392 pre_and_rev_post_order_compute (NULL, rc_order, false);
393 for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
394 bb_order[rc_order[i]] = i;
397 worklist = fibheap_new ();
398 pending = fibheap_new ();
399 visited = sbitmap_alloc (last_basic_block);
400 in_worklist = sbitmap_alloc (last_basic_block);
401 in_pending = sbitmap_alloc (last_basic_block);
402 sbitmap_zero (in_worklist);
404 /* Don't check outgoing edges of entry point. */
405 sbitmap_ones (in_pending);
407 if (BLOCK_INFO (bb)->processed)
408 RESET_BIT (in_pending, bb->index);
411 move_or_delete_vzeroupper_1 (bb, false);
412 fibheap_insert (pending, bb_order[bb->index], bb);
416 fprintf (dump_file, "Check remaining basic blocks\n");
418 while (!fibheap_empty (pending))
420 fibheap_swap = pending;
422 worklist = fibheap_swap;
423 sbitmap_swap = in_pending;
424 in_pending = in_worklist;
425 in_worklist = sbitmap_swap;
427 sbitmap_zero (visited);
429 cfun->machine->rescan_vzeroupper_p = 0;
431 while (!fibheap_empty (worklist))
433 bb = (basic_block) fibheap_extract_min (worklist);
434 RESET_BIT (in_worklist, bb->index);
435 gcc_assert (!TEST_BIT (visited, bb->index));
436 if (!TEST_BIT (visited, bb->index))
440 SET_BIT (visited, bb->index);
442 if (move_or_delete_vzeroupper_1 (bb, false))
443 FOR_EACH_EDGE (e, ei, bb->succs)
445 if (e->dest == EXIT_BLOCK_PTR
446 || BLOCK_INFO (e->dest)->processed)
449 if (TEST_BIT (visited, e->dest->index))
451 if (!TEST_BIT (in_pending, e->dest->index))
453 /* Send E->DEST to next round. */
454 SET_BIT (in_pending, e->dest->index);
455 fibheap_insert (pending,
456 bb_order[e->dest->index],
460 else if (!TEST_BIT (in_worklist, e->dest->index))
462 /* Add E->DEST to current round. */
463 SET_BIT (in_worklist, e->dest->index);
464 fibheap_insert (worklist, bb_order[e->dest->index],
471 if (!cfun->machine->rescan_vzeroupper_p)
476 fibheap_delete (worklist);
477 fibheap_delete (pending);
478 sbitmap_free (visited);
479 sbitmap_free (in_worklist);
480 sbitmap_free (in_pending);
483 fprintf (dump_file, "Process remaining basic blocks\n");
486 move_or_delete_vzeroupper_1 (bb, true);
488 free_aux_for_blocks ();
491 static rtx legitimize_dllimport_symbol (rtx, bool);
493 #ifndef CHECK_STACK_LIMIT
494 #define CHECK_STACK_LIMIT (-1)
497 /* Return index of given mode in mult and division cost tables. */
498 #define MODE_INDEX(mode) \
499 ((mode) == QImode ? 0 \
500 : (mode) == HImode ? 1 \
501 : (mode) == SImode ? 2 \
502 : (mode) == DImode ? 3 \
505 /* Processor costs (relative to an add) */
506 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
507 #define COSTS_N_BYTES(N) ((N) * 2)
509 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
512 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
513 COSTS_N_BYTES (2), /* cost of an add instruction */
514 COSTS_N_BYTES (3), /* cost of a lea instruction */
515 COSTS_N_BYTES (2), /* variable shift costs */
516 COSTS_N_BYTES (3), /* constant shift costs */
517 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
518 COSTS_N_BYTES (3), /* HI */
519 COSTS_N_BYTES (3), /* SI */
520 COSTS_N_BYTES (3), /* DI */
521 COSTS_N_BYTES (5)}, /* other */
522 0, /* cost of multiply per each bit set */
523 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
524 COSTS_N_BYTES (3), /* HI */
525 COSTS_N_BYTES (3), /* SI */
526 COSTS_N_BYTES (3), /* DI */
527 COSTS_N_BYTES (5)}, /* other */
528 COSTS_N_BYTES (3), /* cost of movsx */
529 COSTS_N_BYTES (3), /* cost of movzx */
530 0, /* "large" insn */
532 2, /* cost for loading QImode using movzbl */
533 {2, 2, 2}, /* cost of loading integer registers
534 in QImode, HImode and SImode.
535 Relative to reg-reg move (2). */
536 {2, 2, 2}, /* cost of storing integer registers */
537 2, /* cost of reg,reg fld/fst */
538 {2, 2, 2}, /* cost of loading fp registers
539 in SFmode, DFmode and XFmode */
540 {2, 2, 2}, /* cost of storing fp registers
541 in SFmode, DFmode and XFmode */
542 3, /* cost of moving MMX register */
543 {3, 3}, /* cost of loading MMX registers
544 in SImode and DImode */
545 {3, 3}, /* cost of storing MMX registers
546 in SImode and DImode */
547 3, /* cost of moving SSE register */
548 {3, 3, 3}, /* cost of loading SSE registers
549 in SImode, DImode and TImode */
550 {3, 3, 3}, /* cost of storing SSE registers
551 in SImode, DImode and TImode */
552 3, /* MMX or SSE register to integer */
553 0, /* size of l1 cache */
554 0, /* size of l2 cache */
555 0, /* size of prefetch block */
556 0, /* number of parallel prefetches */
558 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
559 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
560 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
561 COSTS_N_BYTES (2), /* cost of FABS instruction. */
562 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
563 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
564 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
565 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
566 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
567 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
568 1, /* scalar_stmt_cost. */
569 1, /* scalar load_cost. */
570 1, /* scalar_store_cost. */
571 1, /* vec_stmt_cost. */
572 1, /* vec_to_scalar_cost. */
573 1, /* scalar_to_vec_cost. */
574 1, /* vec_align_load_cost. */
575 1, /* vec_unalign_load_cost. */
576 1, /* vec_store_cost. */
577 1, /* cond_taken_branch_cost. */
578 1, /* cond_not_taken_branch_cost. */
581 /* Processor costs (relative to an add) */
583 struct processor_costs i386_cost = { /* 386 specific costs */
584 COSTS_N_INSNS (1), /* cost of an add instruction */
585 COSTS_N_INSNS (1), /* cost of a lea instruction */
586 COSTS_N_INSNS (3), /* variable shift costs */
587 COSTS_N_INSNS (2), /* constant shift costs */
588 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
589 COSTS_N_INSNS (6), /* HI */
590 COSTS_N_INSNS (6), /* SI */
591 COSTS_N_INSNS (6), /* DI */
592 COSTS_N_INSNS (6)}, /* other */
593 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
594 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
595 COSTS_N_INSNS (23), /* HI */
596 COSTS_N_INSNS (23), /* SI */
597 COSTS_N_INSNS (23), /* DI */
598 COSTS_N_INSNS (23)}, /* other */
599 COSTS_N_INSNS (3), /* cost of movsx */
600 COSTS_N_INSNS (2), /* cost of movzx */
601 15, /* "large" insn */
603 4, /* cost for loading QImode using movzbl */
604 {2, 4, 2}, /* cost of loading integer registers
605 in QImode, HImode and SImode.
606 Relative to reg-reg move (2). */
607 {2, 4, 2}, /* cost of storing integer registers */
608 2, /* cost of reg,reg fld/fst */
609 {8, 8, 8}, /* cost of loading fp registers
610 in SFmode, DFmode and XFmode */
611 {8, 8, 8}, /* cost of storing fp registers
612 in SFmode, DFmode and XFmode */
613 2, /* cost of moving MMX register */
614 {4, 8}, /* cost of loading MMX registers
615 in SImode and DImode */
616 {4, 8}, /* cost of storing MMX registers
617 in SImode and DImode */
618 2, /* cost of moving SSE register */
619 {4, 8, 16}, /* cost of loading SSE registers
620 in SImode, DImode and TImode */
621 {4, 8, 16}, /* cost of storing SSE registers
622 in SImode, DImode and TImode */
623 3, /* MMX or SSE register to integer */
624 0, /* size of l1 cache */
625 0, /* size of l2 cache */
626 0, /* size of prefetch block */
627 0, /* number of parallel prefetches */
629 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
630 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
631 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
632 COSTS_N_INSNS (22), /* cost of FABS instruction. */
633 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
634 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
635 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
636 DUMMY_STRINGOP_ALGS},
637 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
638 DUMMY_STRINGOP_ALGS},
639 1, /* scalar_stmt_cost. */
640 1, /* scalar load_cost. */
641 1, /* scalar_store_cost. */
642 1, /* vec_stmt_cost. */
643 1, /* vec_to_scalar_cost. */
644 1, /* scalar_to_vec_cost. */
645 1, /* vec_align_load_cost. */
646 2, /* vec_unalign_load_cost. */
647 1, /* vec_store_cost. */
648 3, /* cond_taken_branch_cost. */
649 1, /* cond_not_taken_branch_cost. */
653 struct processor_costs i486_cost = { /* 486 specific costs */
654 COSTS_N_INSNS (1), /* cost of an add instruction */
655 COSTS_N_INSNS (1), /* cost of a lea instruction */
656 COSTS_N_INSNS (3), /* variable shift costs */
657 COSTS_N_INSNS (2), /* constant shift costs */
658 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
659 COSTS_N_INSNS (12), /* HI */
660 COSTS_N_INSNS (12), /* SI */
661 COSTS_N_INSNS (12), /* DI */
662 COSTS_N_INSNS (12)}, /* other */
663 1, /* cost of multiply per each bit set */
664 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
665 COSTS_N_INSNS (40), /* HI */
666 COSTS_N_INSNS (40), /* SI */
667 COSTS_N_INSNS (40), /* DI */
668 COSTS_N_INSNS (40)}, /* other */
669 COSTS_N_INSNS (3), /* cost of movsx */
670 COSTS_N_INSNS (2), /* cost of movzx */
671 15, /* "large" insn */
673 4, /* cost for loading QImode using movzbl */
674 {2, 4, 2}, /* cost of loading integer registers
675 in QImode, HImode and SImode.
676 Relative to reg-reg move (2). */
677 {2, 4, 2}, /* cost of storing integer registers */
678 2, /* cost of reg,reg fld/fst */
679 {8, 8, 8}, /* cost of loading fp registers
680 in SFmode, DFmode and XFmode */
681 {8, 8, 8}, /* cost of storing fp registers
682 in SFmode, DFmode and XFmode */
683 2, /* cost of moving MMX register */
684 {4, 8}, /* cost of loading MMX registers
685 in SImode and DImode */
686 {4, 8}, /* cost of storing MMX registers
687 in SImode and DImode */
688 2, /* cost of moving SSE register */
689 {4, 8, 16}, /* cost of loading SSE registers
690 in SImode, DImode and TImode */
691 {4, 8, 16}, /* cost of storing SSE registers
692 in SImode, DImode and TImode */
693 3, /* MMX or SSE register to integer */
694 4, /* size of l1 cache. 486 has 8kB cache
695 shared for code and data, so 4kB is
696 not really precise. */
697 4, /* size of l2 cache */
698 0, /* size of prefetch block */
699 0, /* number of parallel prefetches */
701 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
702 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
703 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
704 COSTS_N_INSNS (3), /* cost of FABS instruction. */
705 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
706 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
707 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
708 DUMMY_STRINGOP_ALGS},
709 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
710 DUMMY_STRINGOP_ALGS},
711 1, /* scalar_stmt_cost. */
712 1, /* scalar load_cost. */
713 1, /* scalar_store_cost. */
714 1, /* vec_stmt_cost. */
715 1, /* vec_to_scalar_cost. */
716 1, /* scalar_to_vec_cost. */
717 1, /* vec_align_load_cost. */
718 2, /* vec_unalign_load_cost. */
719 1, /* vec_store_cost. */
720 3, /* cond_taken_branch_cost. */
721 1, /* cond_not_taken_branch_cost. */
725 struct processor_costs pentium_cost = {
726 COSTS_N_INSNS (1), /* cost of an add instruction */
727 COSTS_N_INSNS (1), /* cost of a lea instruction */
728 COSTS_N_INSNS (4), /* variable shift costs */
729 COSTS_N_INSNS (1), /* constant shift costs */
730 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
731 COSTS_N_INSNS (11), /* HI */
732 COSTS_N_INSNS (11), /* SI */
733 COSTS_N_INSNS (11), /* DI */
734 COSTS_N_INSNS (11)}, /* other */
735 0, /* cost of multiply per each bit set */
736 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
737 COSTS_N_INSNS (25), /* HI */
738 COSTS_N_INSNS (25), /* SI */
739 COSTS_N_INSNS (25), /* DI */
740 COSTS_N_INSNS (25)}, /* other */
741 COSTS_N_INSNS (3), /* cost of movsx */
742 COSTS_N_INSNS (2), /* cost of movzx */
743 8, /* "large" insn */
745 6, /* cost for loading QImode using movzbl */
746 {2, 4, 2}, /* cost of loading integer registers
747 in QImode, HImode and SImode.
748 Relative to reg-reg move (2). */
749 {2, 4, 2}, /* cost of storing integer registers */
750 2, /* cost of reg,reg fld/fst */
751 {2, 2, 6}, /* cost of loading fp registers
752 in SFmode, DFmode and XFmode */
753 {4, 4, 6}, /* cost of storing fp registers
754 in SFmode, DFmode and XFmode */
755 8, /* cost of moving MMX register */
756 {8, 8}, /* cost of loading MMX registers
757 in SImode and DImode */
758 {8, 8}, /* cost of storing MMX registers
759 in SImode and DImode */
760 2, /* cost of moving SSE register */
761 {4, 8, 16}, /* cost of loading SSE registers
762 in SImode, DImode and TImode */
763 {4, 8, 16}, /* cost of storing SSE registers
764 in SImode, DImode and TImode */
765 3, /* MMX or SSE register to integer */
766 8, /* size of l1 cache. */
767 8, /* size of l2 cache */
768 0, /* size of prefetch block */
769 0, /* number of parallel prefetches */
771 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
772 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
773 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
774 COSTS_N_INSNS (1), /* cost of FABS instruction. */
775 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
776 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
777 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
778 DUMMY_STRINGOP_ALGS},
779 {{libcall, {{-1, rep_prefix_4_byte}}},
780 DUMMY_STRINGOP_ALGS},
781 1, /* scalar_stmt_cost. */
782 1, /* scalar load_cost. */
783 1, /* scalar_store_cost. */
784 1, /* vec_stmt_cost. */
785 1, /* vec_to_scalar_cost. */
786 1, /* scalar_to_vec_cost. */
787 1, /* vec_align_load_cost. */
788 2, /* vec_unalign_load_cost. */
789 1, /* vec_store_cost. */
790 3, /* cond_taken_branch_cost. */
791 1, /* cond_not_taken_branch_cost. */
795 struct processor_costs pentiumpro_cost = {
796 COSTS_N_INSNS (1), /* cost of an add instruction */
797 COSTS_N_INSNS (1), /* cost of a lea instruction */
798 COSTS_N_INSNS (1), /* variable shift costs */
799 COSTS_N_INSNS (1), /* constant shift costs */
800 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
801 COSTS_N_INSNS (4), /* HI */
802 COSTS_N_INSNS (4), /* SI */
803 COSTS_N_INSNS (4), /* DI */
804 COSTS_N_INSNS (4)}, /* other */
805 0, /* cost of multiply per each bit set */
806 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
807 COSTS_N_INSNS (17), /* HI */
808 COSTS_N_INSNS (17), /* SI */
809 COSTS_N_INSNS (17), /* DI */
810 COSTS_N_INSNS (17)}, /* other */
811 COSTS_N_INSNS (1), /* cost of movsx */
812 COSTS_N_INSNS (1), /* cost of movzx */
813 8, /* "large" insn */
815 2, /* cost for loading QImode using movzbl */
816 {4, 4, 4}, /* cost of loading integer registers
817 in QImode, HImode and SImode.
818 Relative to reg-reg move (2). */
819 {2, 2, 2}, /* cost of storing integer registers */
820 2, /* cost of reg,reg fld/fst */
821 {2, 2, 6}, /* cost of loading fp registers
822 in SFmode, DFmode and XFmode */
823 {4, 4, 6}, /* cost of storing fp registers
824 in SFmode, DFmode and XFmode */
825 2, /* cost of moving MMX register */
826 {2, 2}, /* cost of loading MMX registers
827 in SImode and DImode */
828 {2, 2}, /* cost of storing MMX registers
829 in SImode and DImode */
830 2, /* cost of moving SSE register */
831 {2, 2, 8}, /* cost of loading SSE registers
832 in SImode, DImode and TImode */
833 {2, 2, 8}, /* cost of storing SSE registers
834 in SImode, DImode and TImode */
835 3, /* MMX or SSE register to integer */
836 8, /* size of l1 cache. */
837 256, /* size of l2 cache */
838 32, /* size of prefetch block */
839 6, /* number of parallel prefetches */
841 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
842 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
843 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
844 COSTS_N_INSNS (2), /* cost of FABS instruction. */
845 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
846 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
847 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
848 (we ensure the alignment). For small blocks inline loop is still a
849 noticeable win, for bigger blocks either rep movsl or rep movsb is
850 way to go. Rep movsb has apparently more expensive startup time in CPU,
851 but after 4K the difference is down in the noise. */
852 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
853 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
854 DUMMY_STRINGOP_ALGS},
855 {{rep_prefix_4_byte, {{1024, unrolled_loop},
856 {8192, rep_prefix_4_byte}, {-1, libcall}}},
857 DUMMY_STRINGOP_ALGS},
858 1, /* scalar_stmt_cost. */
859 1, /* scalar load_cost. */
860 1, /* scalar_store_cost. */
861 1, /* vec_stmt_cost. */
862 1, /* vec_to_scalar_cost. */
863 1, /* scalar_to_vec_cost. */
864 1, /* vec_align_load_cost. */
865 2, /* vec_unalign_load_cost. */
866 1, /* vec_store_cost. */
867 3, /* cond_taken_branch_cost. */
868 1, /* cond_not_taken_branch_cost. */
872 struct processor_costs geode_cost = {
873 COSTS_N_INSNS (1), /* cost of an add instruction */
874 COSTS_N_INSNS (1), /* cost of a lea instruction */
875 COSTS_N_INSNS (2), /* variable shift costs */
876 COSTS_N_INSNS (1), /* constant shift costs */
877 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
878 COSTS_N_INSNS (4), /* HI */
879 COSTS_N_INSNS (7), /* SI */
880 COSTS_N_INSNS (7), /* DI */
881 COSTS_N_INSNS (7)}, /* other */
882 0, /* cost of multiply per each bit set */
883 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
884 COSTS_N_INSNS (23), /* HI */
885 COSTS_N_INSNS (39), /* SI */
886 COSTS_N_INSNS (39), /* DI */
887 COSTS_N_INSNS (39)}, /* other */
888 COSTS_N_INSNS (1), /* cost of movsx */
889 COSTS_N_INSNS (1), /* cost of movzx */
890 8, /* "large" insn */
892 1, /* cost for loading QImode using movzbl */
893 {1, 1, 1}, /* cost of loading integer registers
894 in QImode, HImode and SImode.
895 Relative to reg-reg move (2). */
896 {1, 1, 1}, /* cost of storing integer registers */
897 1, /* cost of reg,reg fld/fst */
898 {1, 1, 1}, /* cost of loading fp registers
899 in SFmode, DFmode and XFmode */
900 {4, 6, 6}, /* cost of storing fp registers
901 in SFmode, DFmode and XFmode */
903 1, /* cost of moving MMX register */
904 {1, 1}, /* cost of loading MMX registers
905 in SImode and DImode */
906 {1, 1}, /* cost of storing MMX registers
907 in SImode and DImode */
908 1, /* cost of moving SSE register */
909 {1, 1, 1}, /* cost of loading SSE registers
910 in SImode, DImode and TImode */
911 {1, 1, 1}, /* cost of storing SSE registers
912 in SImode, DImode and TImode */
913 1, /* MMX or SSE register to integer */
914 64, /* size of l1 cache. */
915 128, /* size of l2 cache. */
916 32, /* size of prefetch block */
917 1, /* number of parallel prefetches */
919 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
920 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
921 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
922 COSTS_N_INSNS (1), /* cost of FABS instruction. */
923 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
924 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
925 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
926 DUMMY_STRINGOP_ALGS},
927 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
928 DUMMY_STRINGOP_ALGS},
929 1, /* scalar_stmt_cost. */
930 1, /* scalar load_cost. */
931 1, /* scalar_store_cost. */
932 1, /* vec_stmt_cost. */
933 1, /* vec_to_scalar_cost. */
934 1, /* scalar_to_vec_cost. */
935 1, /* vec_align_load_cost. */
936 2, /* vec_unalign_load_cost. */
937 1, /* vec_store_cost. */
938 3, /* cond_taken_branch_cost. */
939 1, /* cond_not_taken_branch_cost. */
943 struct processor_costs k6_cost = {
944 COSTS_N_INSNS (1), /* cost of an add instruction */
945 COSTS_N_INSNS (2), /* cost of a lea instruction */
946 COSTS_N_INSNS (1), /* variable shift costs */
947 COSTS_N_INSNS (1), /* constant shift costs */
948 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
949 COSTS_N_INSNS (3), /* HI */
950 COSTS_N_INSNS (3), /* SI */
951 COSTS_N_INSNS (3), /* DI */
952 COSTS_N_INSNS (3)}, /* other */
953 0, /* cost of multiply per each bit set */
954 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
955 COSTS_N_INSNS (18), /* HI */
956 COSTS_N_INSNS (18), /* SI */
957 COSTS_N_INSNS (18), /* DI */
958 COSTS_N_INSNS (18)}, /* other */
959 COSTS_N_INSNS (2), /* cost of movsx */
960 COSTS_N_INSNS (2), /* cost of movzx */
961 8, /* "large" insn */
963 3, /* cost for loading QImode using movzbl */
964 {4, 5, 4}, /* cost of loading integer registers
965 in QImode, HImode and SImode.
966 Relative to reg-reg move (2). */
967 {2, 3, 2}, /* cost of storing integer registers */
968 4, /* cost of reg,reg fld/fst */
969 {6, 6, 6}, /* cost of loading fp registers
970 in SFmode, DFmode and XFmode */
971 {4, 4, 4}, /* cost of storing fp registers
972 in SFmode, DFmode and XFmode */
973 2, /* cost of moving MMX register */
974 {2, 2}, /* cost of loading MMX registers
975 in SImode and DImode */
976 {2, 2}, /* cost of storing MMX registers
977 in SImode and DImode */
978 2, /* cost of moving SSE register */
979 {2, 2, 8}, /* cost of loading SSE registers
980 in SImode, DImode and TImode */
981 {2, 2, 8}, /* cost of storing SSE registers
982 in SImode, DImode and TImode */
983 6, /* MMX or SSE register to integer */
984 32, /* size of l1 cache. */
985 32, /* size of l2 cache. Some models
986 have integrated l2 cache, but
987 optimizing for k6 is not important
988 enough to worry about that. */
989 32, /* size of prefetch block */
990 1, /* number of parallel prefetches */
992 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
993 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
994 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
995 COSTS_N_INSNS (2), /* cost of FABS instruction. */
996 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
997 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
998 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
999 DUMMY_STRINGOP_ALGS},
1000 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
1001 DUMMY_STRINGOP_ALGS},
1002 1, /* scalar_stmt_cost. */
1003 1, /* scalar load_cost. */
1004 1, /* scalar_store_cost. */
1005 1, /* vec_stmt_cost. */
1006 1, /* vec_to_scalar_cost. */
1007 1, /* scalar_to_vec_cost. */
1008 1, /* vec_align_load_cost. */
1009 2, /* vec_unalign_load_cost. */
1010 1, /* vec_store_cost. */
1011 3, /* cond_taken_branch_cost. */
1012 1, /* cond_not_taken_branch_cost. */
1016 struct processor_costs athlon_cost = {
1017 COSTS_N_INSNS (1), /* cost of an add instruction */
1018 COSTS_N_INSNS (2), /* cost of a lea instruction */
1019 COSTS_N_INSNS (1), /* variable shift costs */
1020 COSTS_N_INSNS (1), /* constant shift costs */
1021 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
1022 COSTS_N_INSNS (5), /* HI */
1023 COSTS_N_INSNS (5), /* SI */
1024 COSTS_N_INSNS (5), /* DI */
1025 COSTS_N_INSNS (5)}, /* other */
1026 0, /* cost of multiply per each bit set */
1027 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1028 COSTS_N_INSNS (26), /* HI */
1029 COSTS_N_INSNS (42), /* SI */
1030 COSTS_N_INSNS (74), /* DI */
1031 COSTS_N_INSNS (74)}, /* other */
1032 COSTS_N_INSNS (1), /* cost of movsx */
1033 COSTS_N_INSNS (1), /* cost of movzx */
1034 8, /* "large" insn */
1036 4, /* cost for loading QImode using movzbl */
1037 {3, 4, 3}, /* cost of loading integer registers
1038 in QImode, HImode and SImode.
1039 Relative to reg-reg move (2). */
1040 {3, 4, 3}, /* cost of storing integer registers */
1041 4, /* cost of reg,reg fld/fst */
1042 {4, 4, 12}, /* cost of loading fp registers
1043 in SFmode, DFmode and XFmode */
1044 {6, 6, 8}, /* cost of storing fp registers
1045 in SFmode, DFmode and XFmode */
1046 2, /* cost of moving MMX register */
1047 {4, 4}, /* cost of loading MMX registers
1048 in SImode and DImode */
1049 {4, 4}, /* cost of storing MMX registers
1050 in SImode and DImode */
1051 2, /* cost of moving SSE register */
1052 {4, 4, 6}, /* cost of loading SSE registers
1053 in SImode, DImode and TImode */
1054 {4, 4, 5}, /* cost of storing SSE registers
1055 in SImode, DImode and TImode */
1056 5, /* MMX or SSE register to integer */
1057 64, /* size of l1 cache. */
1058 256, /* size of l2 cache. */
1059 64, /* size of prefetch block */
1060 6, /* number of parallel prefetches */
1061 5, /* Branch cost */
1062 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1063 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1064 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
1065 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1066 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1067 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1068 /* For some reason, Athlon deals better with REP prefix (relative to loops)
1069 compared to K8. Alignment becomes important after 8 bytes for memcpy and
1070 128 bytes for memset. */
1071 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1072 DUMMY_STRINGOP_ALGS},
1073 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1074 DUMMY_STRINGOP_ALGS},
1075 1, /* scalar_stmt_cost. */
1076 1, /* scalar load_cost. */
1077 1, /* scalar_store_cost. */
1078 1, /* vec_stmt_cost. */
1079 1, /* vec_to_scalar_cost. */
1080 1, /* scalar_to_vec_cost. */
1081 1, /* vec_align_load_cost. */
1082 2, /* vec_unalign_load_cost. */
1083 1, /* vec_store_cost. */
1084 3, /* cond_taken_branch_cost. */
1085 1, /* cond_not_taken_branch_cost. */
1089 struct processor_costs k8_cost = {
1090 COSTS_N_INSNS (1), /* cost of an add instruction */
1091 COSTS_N_INSNS (2), /* cost of a lea instruction */
1092 COSTS_N_INSNS (1), /* variable shift costs */
1093 COSTS_N_INSNS (1), /* constant shift costs */
1094 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1095 COSTS_N_INSNS (4), /* HI */
1096 COSTS_N_INSNS (3), /* SI */
1097 COSTS_N_INSNS (4), /* DI */
1098 COSTS_N_INSNS (5)}, /* other */
1099 0, /* cost of multiply per each bit set */
1100 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1101 COSTS_N_INSNS (26), /* HI */
1102 COSTS_N_INSNS (42), /* SI */
1103 COSTS_N_INSNS (74), /* DI */
1104 COSTS_N_INSNS (74)}, /* other */
1105 COSTS_N_INSNS (1), /* cost of movsx */
1106 COSTS_N_INSNS (1), /* cost of movzx */
1107 8, /* "large" insn */
1109 4, /* cost for loading QImode using movzbl */
1110 {3, 4, 3}, /* cost of loading integer registers
1111 in QImode, HImode and SImode.
1112 Relative to reg-reg move (2). */
1113 {3, 4, 3}, /* cost of storing integer registers */
1114 4, /* cost of reg,reg fld/fst */
1115 {4, 4, 12}, /* cost of loading fp registers
1116 in SFmode, DFmode and XFmode */
1117 {6, 6, 8}, /* cost of storing fp registers
1118 in SFmode, DFmode and XFmode */
1119 2, /* cost of moving MMX register */
1120 {3, 3}, /* cost of loading MMX registers
1121 in SImode and DImode */
1122 {4, 4}, /* cost of storing MMX registers
1123 in SImode and DImode */
1124 2, /* cost of moving SSE register */
1125 {4, 3, 6}, /* cost of loading SSE registers
1126 in SImode, DImode and TImode */
1127 {4, 4, 5}, /* cost of storing SSE registers
1128 in SImode, DImode and TImode */
1129 5, /* MMX or SSE register to integer */
1130 64, /* size of l1 cache. */
1131 512, /* size of l2 cache. */
1132 64, /* size of prefetch block */
1133 /* New AMD processors never drop prefetches; if they cannot be performed
1134 immediately, they are queued. We set number of simultaneous prefetches
1135 to a large constant to reflect this (it probably is not a good idea not
1136 to limit number of prefetches at all, as their execution also takes some
1138 100, /* number of parallel prefetches */
1139 3, /* Branch cost */
1140 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1141 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1142 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1143 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1144 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1145 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1146 /* K8 has optimized REP instruction for medium sized blocks, but for very
1147 small blocks it is better to use loop. For large blocks, libcall can
1148 do nontemporary accesses and beat inline considerably. */
1149 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1150 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1151 {{libcall, {{8, loop}, {24, unrolled_loop},
1152 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1153 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1154 4, /* scalar_stmt_cost. */
1155 2, /* scalar load_cost. */
1156 2, /* scalar_store_cost. */
1157 5, /* vec_stmt_cost. */
1158 0, /* vec_to_scalar_cost. */
1159 2, /* scalar_to_vec_cost. */
1160 2, /* vec_align_load_cost. */
1161 3, /* vec_unalign_load_cost. */
1162 3, /* vec_store_cost. */
1163 3, /* cond_taken_branch_cost. */
1164 2, /* cond_not_taken_branch_cost. */
1167 struct processor_costs amdfam10_cost = {
1168 COSTS_N_INSNS (1), /* cost of an add instruction */
1169 COSTS_N_INSNS (2), /* cost of a lea instruction */
1170 COSTS_N_INSNS (1), /* variable shift costs */
1171 COSTS_N_INSNS (1), /* constant shift costs */
1172 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1173 COSTS_N_INSNS (4), /* HI */
1174 COSTS_N_INSNS (3), /* SI */
1175 COSTS_N_INSNS (4), /* DI */
1176 COSTS_N_INSNS (5)}, /* other */
1177 0, /* cost of multiply per each bit set */
1178 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1179 COSTS_N_INSNS (35), /* HI */
1180 COSTS_N_INSNS (51), /* SI */
1181 COSTS_N_INSNS (83), /* DI */
1182 COSTS_N_INSNS (83)}, /* other */
1183 COSTS_N_INSNS (1), /* cost of movsx */
1184 COSTS_N_INSNS (1), /* cost of movzx */
1185 8, /* "large" insn */
1187 4, /* cost for loading QImode using movzbl */
1188 {3, 4, 3}, /* cost of loading integer registers
1189 in QImode, HImode and SImode.
1190 Relative to reg-reg move (2). */
1191 {3, 4, 3}, /* cost of storing integer registers */
1192 4, /* cost of reg,reg fld/fst */
1193 {4, 4, 12}, /* cost of loading fp registers
1194 in SFmode, DFmode and XFmode */
1195 {6, 6, 8}, /* cost of storing fp registers
1196 in SFmode, DFmode and XFmode */
1197 2, /* cost of moving MMX register */
1198 {3, 3}, /* cost of loading MMX registers
1199 in SImode and DImode */
1200 {4, 4}, /* cost of storing MMX registers
1201 in SImode and DImode */
1202 2, /* cost of moving SSE register */
1203 {4, 4, 3}, /* cost of loading SSE registers
1204 in SImode, DImode and TImode */
1205 {4, 4, 5}, /* cost of storing SSE registers
1206 in SImode, DImode and TImode */
1207 3, /* MMX or SSE register to integer */
1209 MOVD reg64, xmmreg Double FSTORE 4
1210 MOVD reg32, xmmreg Double FSTORE 4
1212 MOVD reg64, xmmreg Double FADD 3
1214 MOVD reg32, xmmreg Double FADD 3
1216 64, /* size of l1 cache. */
1217 512, /* size of l2 cache. */
1218 64, /* size of prefetch block */
1219 /* New AMD processors never drop prefetches; if they cannot be performed
1220 immediately, they are queued. We set number of simultaneous prefetches
1221 to a large constant to reflect this (it probably is not a good idea not
1222 to limit number of prefetches at all, as their execution also takes some
1224 100, /* number of parallel prefetches */
1225 2, /* Branch cost */
1226 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1227 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1228 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1229 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1230 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1231 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1233 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
1234 very small blocks it is better to use loop. For large blocks, libcall can
1235 do nontemporary accesses and beat inline considerably. */
1236 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1237 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1238 {{libcall, {{8, loop}, {24, unrolled_loop},
1239 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1240 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1241 4, /* scalar_stmt_cost. */
1242 2, /* scalar load_cost. */
1243 2, /* scalar_store_cost. */
1244 6, /* vec_stmt_cost. */
1245 0, /* vec_to_scalar_cost. */
1246 2, /* scalar_to_vec_cost. */
1247 2, /* vec_align_load_cost. */
1248 2, /* vec_unalign_load_cost. */
1249 2, /* vec_store_cost. */
1250 2, /* cond_taken_branch_cost. */
1251 1, /* cond_not_taken_branch_cost. */
1254 struct processor_costs bdver1_cost = {
1255 COSTS_N_INSNS (1), /* cost of an add instruction */
1256 COSTS_N_INSNS (1), /* cost of a lea instruction */
1257 COSTS_N_INSNS (1), /* variable shift costs */
1258 COSTS_N_INSNS (1), /* constant shift costs */
1259 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1260 COSTS_N_INSNS (4), /* HI */
1261 COSTS_N_INSNS (4), /* SI */
1262 COSTS_N_INSNS (6), /* DI */
1263 COSTS_N_INSNS (6)}, /* other */
1264 0, /* cost of multiply per each bit set */
1265 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1266 COSTS_N_INSNS (35), /* HI */
1267 COSTS_N_INSNS (51), /* SI */
1268 COSTS_N_INSNS (83), /* DI */
1269 COSTS_N_INSNS (83)}, /* other */
1270 COSTS_N_INSNS (1), /* cost of movsx */
1271 COSTS_N_INSNS (1), /* cost of movzx */
1272 8, /* "large" insn */
1274 4, /* cost for loading QImode using movzbl */
1275 {5, 5, 4}, /* cost of loading integer registers
1276 in QImode, HImode and SImode.
1277 Relative to reg-reg move (2). */
1278 {4, 4, 4}, /* cost of storing integer registers */
1279 2, /* cost of reg,reg fld/fst */
1280 {5, 5, 12}, /* cost of loading fp registers
1281 in SFmode, DFmode and XFmode */
1282 {4, 4, 8}, /* cost of storing fp registers
1283 in SFmode, DFmode and XFmode */
1284 2, /* cost of moving MMX register */
1285 {4, 4}, /* cost of loading MMX registers
1286 in SImode and DImode */
1287 {4, 4}, /* cost of storing MMX registers
1288 in SImode and DImode */
1289 2, /* cost of moving SSE register */
1290 {4, 4, 4}, /* cost of loading SSE registers
1291 in SImode, DImode and TImode */
1292 {4, 4, 4}, /* cost of storing SSE registers
1293 in SImode, DImode and TImode */
1294 2, /* MMX or SSE register to integer */
1296 MOVD reg64, xmmreg Double FSTORE 4
1297 MOVD reg32, xmmreg Double FSTORE 4
1299 MOVD reg64, xmmreg Double FADD 3
1301 MOVD reg32, xmmreg Double FADD 3
1303 16, /* size of l1 cache. */
1304 2048, /* size of l2 cache. */
1305 64, /* size of prefetch block */
1306 /* New AMD processors never drop prefetches; if they cannot be performed
1307 immediately, they are queued. We set number of simultaneous prefetches
1308 to a large constant to reflect this (it probably is not a good idea not
1309 to limit number of prefetches at all, as their execution also takes some
1311 100, /* number of parallel prefetches */
1312 2, /* Branch cost */
1313 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1314 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1315 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1316 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1317 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1318 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1320 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
1321 very small blocks it is better to use loop. For large blocks, libcall
1322 can do nontemporary accesses and beat inline considerably. */
1323 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1324 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1325 {{libcall, {{8, loop}, {24, unrolled_loop},
1326 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1327 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1328 6, /* scalar_stmt_cost. */
1329 4, /* scalar load_cost. */
1330 4, /* scalar_store_cost. */
1331 6, /* vec_stmt_cost. */
1332 0, /* vec_to_scalar_cost. */
1333 2, /* scalar_to_vec_cost. */
1334 4, /* vec_align_load_cost. */
1335 4, /* vec_unalign_load_cost. */
1336 4, /* vec_store_cost. */
1337 2, /* cond_taken_branch_cost. */
1338 1, /* cond_not_taken_branch_cost. */
1341 struct processor_costs bdver2_cost = {
1342 COSTS_N_INSNS (1), /* cost of an add instruction */
1343 COSTS_N_INSNS (1), /* cost of a lea instruction */
1344 COSTS_N_INSNS (1), /* variable shift costs */
1345 COSTS_N_INSNS (1), /* constant shift costs */
1346 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1347 COSTS_N_INSNS (4), /* HI */
1348 COSTS_N_INSNS (4), /* SI */
1349 COSTS_N_INSNS (6), /* DI */
1350 COSTS_N_INSNS (6)}, /* other */
1351 0, /* cost of multiply per each bit set */
1352 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1353 COSTS_N_INSNS (35), /* HI */
1354 COSTS_N_INSNS (51), /* SI */
1355 COSTS_N_INSNS (83), /* DI */
1356 COSTS_N_INSNS (83)}, /* other */
1357 COSTS_N_INSNS (1), /* cost of movsx */
1358 COSTS_N_INSNS (1), /* cost of movzx */
1359 8, /* "large" insn */
1361 4, /* cost for loading QImode using movzbl */
1362 {5, 5, 4}, /* cost of loading integer registers
1363 in QImode, HImode and SImode.
1364 Relative to reg-reg move (2). */
1365 {4, 4, 4}, /* cost of storing integer registers */
1366 2, /* cost of reg,reg fld/fst */
1367 {5, 5, 12}, /* cost of loading fp registers
1368 in SFmode, DFmode and XFmode */
1369 {4, 4, 8}, /* cost of storing fp registers
1370 in SFmode, DFmode and XFmode */
1371 2, /* cost of moving MMX register */
1372 {4, 4}, /* cost of loading MMX registers
1373 in SImode and DImode */
1374 {4, 4}, /* cost of storing MMX registers
1375 in SImode and DImode */
1376 2, /* cost of moving SSE register */
1377 {4, 4, 4}, /* cost of loading SSE registers
1378 in SImode, DImode and TImode */
1379 {4, 4, 4}, /* cost of storing SSE registers
1380 in SImode, DImode and TImode */
1381 2, /* MMX or SSE register to integer */
1383 MOVD reg64, xmmreg Double FSTORE 4
1384 MOVD reg32, xmmreg Double FSTORE 4
1386 MOVD reg64, xmmreg Double FADD 3
1388 MOVD reg32, xmmreg Double FADD 3
1390 16, /* size of l1 cache. */
1391 2048, /* size of l2 cache. */
1392 64, /* size of prefetch block */
1393 /* New AMD processors never drop prefetches; if they cannot be performed
1394 immediately, they are queued. We set number of simultaneous prefetches
1395 to a large constant to reflect this (it probably is not a good idea not
1396 to limit number of prefetches at all, as their execution also takes some
1398 100, /* number of parallel prefetches */
1399 2, /* Branch cost */
1400 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1401 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1402 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1403 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1404 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1405 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1407 /* BDVER2 has optimized REP instruction for medium sized blocks, but for
1408 very small blocks it is better to use loop. For large blocks, libcall
1409 can do nontemporary accesses and beat inline considerably. */
1410 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1411 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1412 {{libcall, {{8, loop}, {24, unrolled_loop},
1413 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1414 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1415 6, /* scalar_stmt_cost. */
1416 4, /* scalar load_cost. */
1417 4, /* scalar_store_cost. */
1418 6, /* vec_stmt_cost. */
1419 0, /* vec_to_scalar_cost. */
1420 2, /* scalar_to_vec_cost. */
1421 4, /* vec_align_load_cost. */
1422 4, /* vec_unalign_load_cost. */
1423 4, /* vec_store_cost. */
1424 2, /* cond_taken_branch_cost. */
1425 1, /* cond_not_taken_branch_cost. */
1428 struct processor_costs btver1_cost = {
1429 COSTS_N_INSNS (1), /* cost of an add instruction */
1430 COSTS_N_INSNS (2), /* cost of a lea instruction */
1431 COSTS_N_INSNS (1), /* variable shift costs */
1432 COSTS_N_INSNS (1), /* constant shift costs */
1433 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1434 COSTS_N_INSNS (4), /* HI */
1435 COSTS_N_INSNS (3), /* SI */
1436 COSTS_N_INSNS (4), /* DI */
1437 COSTS_N_INSNS (5)}, /* other */
1438 0, /* cost of multiply per each bit set */
1439 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1440 COSTS_N_INSNS (35), /* HI */
1441 COSTS_N_INSNS (51), /* SI */
1442 COSTS_N_INSNS (83), /* DI */
1443 COSTS_N_INSNS (83)}, /* other */
1444 COSTS_N_INSNS (1), /* cost of movsx */
1445 COSTS_N_INSNS (1), /* cost of movzx */
1446 8, /* "large" insn */
1448 4, /* cost for loading QImode using movzbl */
1449 {3, 4, 3}, /* cost of loading integer registers
1450 in QImode, HImode and SImode.
1451 Relative to reg-reg move (2). */
1452 {3, 4, 3}, /* cost of storing integer registers */
1453 4, /* cost of reg,reg fld/fst */
1454 {4, 4, 12}, /* cost of loading fp registers
1455 in SFmode, DFmode and XFmode */
1456 {6, 6, 8}, /* cost of storing fp registers
1457 in SFmode, DFmode and XFmode */
1458 2, /* cost of moving MMX register */
1459 {3, 3}, /* cost of loading MMX registers
1460 in SImode and DImode */
1461 {4, 4}, /* cost of storing MMX registers
1462 in SImode and DImode */
1463 2, /* cost of moving SSE register */
1464 {4, 4, 3}, /* cost of loading SSE registers
1465 in SImode, DImode and TImode */
1466 {4, 4, 5}, /* cost of storing SSE registers
1467 in SImode, DImode and TImode */
1468 3, /* MMX or SSE register to integer */
1470 MOVD reg64, xmmreg Double FSTORE 4
1471 MOVD reg32, xmmreg Double FSTORE 4
1473 MOVD reg64, xmmreg Double FADD 3
1475 MOVD reg32, xmmreg Double FADD 3
1477 32, /* size of l1 cache. */
1478 512, /* size of l2 cache. */
1479 64, /* size of prefetch block */
1480 100, /* number of parallel prefetches */
1481 2, /* Branch cost */
1482 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1483 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1484 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1485 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1486 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1487 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1489 /* BTVER1 has optimized REP instruction for medium sized blocks, but for
1490 very small blocks it is better to use loop. For large blocks, libcall can
1491 do nontemporary accesses and beat inline considerably. */
1492 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1493 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1494 {{libcall, {{8, loop}, {24, unrolled_loop},
1495 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1496 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1497 4, /* scalar_stmt_cost. */
1498 2, /* scalar load_cost. */
1499 2, /* scalar_store_cost. */
1500 6, /* vec_stmt_cost. */
1501 0, /* vec_to_scalar_cost. */
1502 2, /* scalar_to_vec_cost. */
1503 2, /* vec_align_load_cost. */
1504 2, /* vec_unalign_load_cost. */
1505 2, /* vec_store_cost. */
1506 2, /* cond_taken_branch_cost. */
1507 1, /* cond_not_taken_branch_cost. */
1511 struct processor_costs pentium4_cost = {
1512 COSTS_N_INSNS (1), /* cost of an add instruction */
1513 COSTS_N_INSNS (3), /* cost of a lea instruction */
1514 COSTS_N_INSNS (4), /* variable shift costs */
1515 COSTS_N_INSNS (4), /* constant shift costs */
1516 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
1517 COSTS_N_INSNS (15), /* HI */
1518 COSTS_N_INSNS (15), /* SI */
1519 COSTS_N_INSNS (15), /* DI */
1520 COSTS_N_INSNS (15)}, /* other */
1521 0, /* cost of multiply per each bit set */
1522 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
1523 COSTS_N_INSNS (56), /* HI */
1524 COSTS_N_INSNS (56), /* SI */
1525 COSTS_N_INSNS (56), /* DI */
1526 COSTS_N_INSNS (56)}, /* other */
1527 COSTS_N_INSNS (1), /* cost of movsx */
1528 COSTS_N_INSNS (1), /* cost of movzx */
1529 16, /* "large" insn */
1531 2, /* cost for loading QImode using movzbl */
1532 {4, 5, 4}, /* cost of loading integer registers
1533 in QImode, HImode and SImode.
1534 Relative to reg-reg move (2). */
1535 {2, 3, 2}, /* cost of storing integer registers */
1536 2, /* cost of reg,reg fld/fst */
1537 {2, 2, 6}, /* cost of loading fp registers
1538 in SFmode, DFmode and XFmode */
1539 {4, 4, 6}, /* cost of storing fp registers
1540 in SFmode, DFmode and XFmode */
1541 2, /* cost of moving MMX register */
1542 {2, 2}, /* cost of loading MMX registers
1543 in SImode and DImode */
1544 {2, 2}, /* cost of storing MMX registers
1545 in SImode and DImode */
1546 12, /* cost of moving SSE register */
1547 {12, 12, 12}, /* cost of loading SSE registers
1548 in SImode, DImode and TImode */
1549 {2, 2, 8}, /* cost of storing SSE registers
1550 in SImode, DImode and TImode */
1551 10, /* MMX or SSE register to integer */
1552 8, /* size of l1 cache. */
1553 256, /* size of l2 cache. */
1554 64, /* size of prefetch block */
1555 6, /* number of parallel prefetches */
1556 2, /* Branch cost */
1557 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
1558 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
1559 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
1560 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1561 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1562 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
1563 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1564 DUMMY_STRINGOP_ALGS},
1565 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1567 DUMMY_STRINGOP_ALGS},
1568 1, /* scalar_stmt_cost. */
1569 1, /* scalar load_cost. */
1570 1, /* scalar_store_cost. */
1571 1, /* vec_stmt_cost. */
1572 1, /* vec_to_scalar_cost. */
1573 1, /* scalar_to_vec_cost. */
1574 1, /* vec_align_load_cost. */
1575 2, /* vec_unalign_load_cost. */
1576 1, /* vec_store_cost. */
1577 3, /* cond_taken_branch_cost. */
1578 1, /* cond_not_taken_branch_cost. */
1582 struct processor_costs nocona_cost = {
1583 COSTS_N_INSNS (1), /* cost of an add instruction */
1584 COSTS_N_INSNS (1), /* cost of a lea instruction */
1585 COSTS_N_INSNS (1), /* variable shift costs */
1586 COSTS_N_INSNS (1), /* constant shift costs */
1587 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
1588 COSTS_N_INSNS (10), /* HI */
1589 COSTS_N_INSNS (10), /* SI */
1590 COSTS_N_INSNS (10), /* DI */
1591 COSTS_N_INSNS (10)}, /* other */
1592 0, /* cost of multiply per each bit set */
1593 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
1594 COSTS_N_INSNS (66), /* HI */
1595 COSTS_N_INSNS (66), /* SI */
1596 COSTS_N_INSNS (66), /* DI */
1597 COSTS_N_INSNS (66)}, /* other */
1598 COSTS_N_INSNS (1), /* cost of movsx */
1599 COSTS_N_INSNS (1), /* cost of movzx */
1600 16, /* "large" insn */
1601 17, /* MOVE_RATIO */
1602 4, /* cost for loading QImode using movzbl */
1603 {4, 4, 4}, /* cost of loading integer registers
1604 in QImode, HImode and SImode.
1605 Relative to reg-reg move (2). */
1606 {4, 4, 4}, /* cost of storing integer registers */
1607 3, /* cost of reg,reg fld/fst */
1608 {12, 12, 12}, /* cost of loading fp registers
1609 in SFmode, DFmode and XFmode */
1610 {4, 4, 4}, /* cost of storing fp registers
1611 in SFmode, DFmode and XFmode */
1612 6, /* cost of moving MMX register */
1613 {12, 12}, /* cost of loading MMX registers
1614 in SImode and DImode */
1615 {12, 12}, /* cost of storing MMX registers
1616 in SImode and DImode */
1617 6, /* cost of moving SSE register */
1618 {12, 12, 12}, /* cost of loading SSE registers
1619 in SImode, DImode and TImode */
1620 {12, 12, 12}, /* cost of storing SSE registers
1621 in SImode, DImode and TImode */
1622 8, /* MMX or SSE register to integer */
1623 8, /* size of l1 cache. */
1624 1024, /* size of l2 cache. */
1625 128, /* size of prefetch block */
1626 8, /* number of parallel prefetches */
1627 1, /* Branch cost */
1628 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1629 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1630 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1631 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1632 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1633 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1634 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1635 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1636 {100000, unrolled_loop}, {-1, libcall}}}},
1637 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1639 {libcall, {{24, loop}, {64, unrolled_loop},
1640 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1641 1, /* scalar_stmt_cost. */
1642 1, /* scalar load_cost. */
1643 1, /* scalar_store_cost. */
1644 1, /* vec_stmt_cost. */
1645 1, /* vec_to_scalar_cost. */
1646 1, /* scalar_to_vec_cost. */
1647 1, /* vec_align_load_cost. */
1648 2, /* vec_unalign_load_cost. */
1649 1, /* vec_store_cost. */
1650 3, /* cond_taken_branch_cost. */
1651 1, /* cond_not_taken_branch_cost. */
1655 struct processor_costs atom_cost = {
1656 COSTS_N_INSNS (1), /* cost of an add instruction */
1657 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1658 COSTS_N_INSNS (1), /* variable shift costs */
1659 COSTS_N_INSNS (1), /* constant shift costs */
1660 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1661 COSTS_N_INSNS (4), /* HI */
1662 COSTS_N_INSNS (3), /* SI */
1663 COSTS_N_INSNS (4), /* DI */
1664 COSTS_N_INSNS (2)}, /* other */
1665 0, /* cost of multiply per each bit set */
1666 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1667 COSTS_N_INSNS (26), /* HI */
1668 COSTS_N_INSNS (42), /* SI */
1669 COSTS_N_INSNS (74), /* DI */
1670 COSTS_N_INSNS (74)}, /* other */
1671 COSTS_N_INSNS (1), /* cost of movsx */
1672 COSTS_N_INSNS (1), /* cost of movzx */
1673 8, /* "large" insn */
1674 17, /* MOVE_RATIO */
1675 4, /* cost for loading QImode using movzbl */
1676 {4, 4, 4}, /* cost of loading integer registers
1677 in QImode, HImode and SImode.
1678 Relative to reg-reg move (2). */
1679 {4, 4, 4}, /* cost of storing integer registers */
1680 4, /* cost of reg,reg fld/fst */
1681 {12, 12, 12}, /* cost of loading fp registers
1682 in SFmode, DFmode and XFmode */
1683 {6, 6, 8}, /* cost of storing fp registers
1684 in SFmode, DFmode and XFmode */
1685 2, /* cost of moving MMX register */
1686 {8, 8}, /* cost of loading MMX registers
1687 in SImode and DImode */
1688 {8, 8}, /* cost of storing MMX registers
1689 in SImode and DImode */
1690 2, /* cost of moving SSE register */
1691 {8, 8, 8}, /* cost of loading SSE registers
1692 in SImode, DImode and TImode */
1693 {8, 8, 8}, /* cost of storing SSE registers
1694 in SImode, DImode and TImode */
1695 5, /* MMX or SSE register to integer */
1696 32, /* size of l1 cache. */
1697 256, /* size of l2 cache. */
1698 64, /* size of prefetch block */
1699 6, /* number of parallel prefetches */
1700 3, /* Branch cost */
1701 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1702 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1703 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1704 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1705 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1706 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1707 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1708 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1709 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1710 {{libcall, {{8, loop}, {15, unrolled_loop},
1711 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1712 {libcall, {{24, loop}, {32, unrolled_loop},
1713 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1714 1, /* scalar_stmt_cost. */
1715 1, /* scalar load_cost. */
1716 1, /* scalar_store_cost. */
1717 1, /* vec_stmt_cost. */
1718 1, /* vec_to_scalar_cost. */
1719 1, /* scalar_to_vec_cost. */
1720 1, /* vec_align_load_cost. */
1721 2, /* vec_unalign_load_cost. */
1722 1, /* vec_store_cost. */
1723 3, /* cond_taken_branch_cost. */
1724 1, /* cond_not_taken_branch_cost. */
1727 /* Generic64 should produce code tuned for Nocona and K8. */
1729 struct processor_costs generic64_cost = {
1730 COSTS_N_INSNS (1), /* cost of an add instruction */
1731 /* On all chips taken into consideration lea is 2 cycles and more. With
1732 this cost however our current implementation of synth_mult results in
1733 use of unnecessary temporary registers causing regression on several
1734 SPECfp benchmarks. */
1735 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1736 COSTS_N_INSNS (1), /* variable shift costs */
1737 COSTS_N_INSNS (1), /* constant shift costs */
1738 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1739 COSTS_N_INSNS (4), /* HI */
1740 COSTS_N_INSNS (3), /* SI */
1741 COSTS_N_INSNS (4), /* DI */
1742 COSTS_N_INSNS (2)}, /* other */
1743 0, /* cost of multiply per each bit set */
1744 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1745 COSTS_N_INSNS (26), /* HI */
1746 COSTS_N_INSNS (42), /* SI */
1747 COSTS_N_INSNS (74), /* DI */
1748 COSTS_N_INSNS (74)}, /* other */
1749 COSTS_N_INSNS (1), /* cost of movsx */
1750 COSTS_N_INSNS (1), /* cost of movzx */
1751 8, /* "large" insn */
1752 17, /* MOVE_RATIO */
1753 4, /* cost for loading QImode using movzbl */
1754 {4, 4, 4}, /* cost of loading integer registers
1755 in QImode, HImode and SImode.
1756 Relative to reg-reg move (2). */
1757 {4, 4, 4}, /* cost of storing integer registers */
1758 4, /* cost of reg,reg fld/fst */
1759 {12, 12, 12}, /* cost of loading fp registers
1760 in SFmode, DFmode and XFmode */
1761 {6, 6, 8}, /* cost of storing fp registers
1762 in SFmode, DFmode and XFmode */
1763 2, /* cost of moving MMX register */
1764 {8, 8}, /* cost of loading MMX registers
1765 in SImode and DImode */
1766 {8, 8}, /* cost of storing MMX registers
1767 in SImode and DImode */
1768 2, /* cost of moving SSE register */
1769 {8, 8, 8}, /* cost of loading SSE registers
1770 in SImode, DImode and TImode */
1771 {8, 8, 8}, /* cost of storing SSE registers
1772 in SImode, DImode and TImode */
1773 5, /* MMX or SSE register to integer */
1774 32, /* size of l1 cache. */
1775 512, /* size of l2 cache. */
1776 64, /* size of prefetch block */
1777 6, /* number of parallel prefetches */
1778 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1779 value is increased to perhaps more appropriate value of 5. */
1780 3, /* Branch cost */
1781 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1782 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1783 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1784 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1785 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1786 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1787 {DUMMY_STRINGOP_ALGS,
1788 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1789 {DUMMY_STRINGOP_ALGS,
1790 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1791 1, /* scalar_stmt_cost. */
1792 1, /* scalar load_cost. */
1793 1, /* scalar_store_cost. */
1794 1, /* vec_stmt_cost. */
1795 1, /* vec_to_scalar_cost. */
1796 1, /* scalar_to_vec_cost. */
1797 1, /* vec_align_load_cost. */
1798 2, /* vec_unalign_load_cost. */
1799 1, /* vec_store_cost. */
1800 3, /* cond_taken_branch_cost. */
1801 1, /* cond_not_taken_branch_cost. */
1804 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1807 struct processor_costs generic32_cost = {
1808 COSTS_N_INSNS (1), /* cost of an add instruction */
1809 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1810 COSTS_N_INSNS (1), /* variable shift costs */
1811 COSTS_N_INSNS (1), /* constant shift costs */
1812 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1813 COSTS_N_INSNS (4), /* HI */
1814 COSTS_N_INSNS (3), /* SI */
1815 COSTS_N_INSNS (4), /* DI */
1816 COSTS_N_INSNS (2)}, /* other */
1817 0, /* cost of multiply per each bit set */
1818 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1819 COSTS_N_INSNS (26), /* HI */
1820 COSTS_N_INSNS (42), /* SI */
1821 COSTS_N_INSNS (74), /* DI */
1822 COSTS_N_INSNS (74)}, /* other */
1823 COSTS_N_INSNS (1), /* cost of movsx */
1824 COSTS_N_INSNS (1), /* cost of movzx */
1825 8, /* "large" insn */
1826 17, /* MOVE_RATIO */
1827 4, /* cost for loading QImode using movzbl */
1828 {4, 4, 4}, /* cost of loading integer registers
1829 in QImode, HImode and SImode.
1830 Relative to reg-reg move (2). */
1831 {4, 4, 4}, /* cost of storing integer registers */
1832 4, /* cost of reg,reg fld/fst */
1833 {12, 12, 12}, /* cost of loading fp registers
1834 in SFmode, DFmode and XFmode */
1835 {6, 6, 8}, /* cost of storing fp registers
1836 in SFmode, DFmode and XFmode */
1837 2, /* cost of moving MMX register */
1838 {8, 8}, /* cost of loading MMX registers
1839 in SImode and DImode */
1840 {8, 8}, /* cost of storing MMX registers
1841 in SImode and DImode */
1842 2, /* cost of moving SSE register */
1843 {8, 8, 8}, /* cost of loading SSE registers
1844 in SImode, DImode and TImode */
1845 {8, 8, 8}, /* cost of storing SSE registers
1846 in SImode, DImode and TImode */
1847 5, /* MMX or SSE register to integer */
1848 32, /* size of l1 cache. */
1849 256, /* size of l2 cache. */
1850 64, /* size of prefetch block */
1851 6, /* number of parallel prefetches */
1852 3, /* Branch cost */
1853 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1854 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1855 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1856 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1857 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1858 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1859 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1860 DUMMY_STRINGOP_ALGS},
1861 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1862 DUMMY_STRINGOP_ALGS},
1863 1, /* scalar_stmt_cost. */
1864 1, /* scalar load_cost. */
1865 1, /* scalar_store_cost. */
1866 1, /* vec_stmt_cost. */
1867 1, /* vec_to_scalar_cost. */
1868 1, /* scalar_to_vec_cost. */
1869 1, /* vec_align_load_cost. */
1870 2, /* vec_unalign_load_cost. */
1871 1, /* vec_store_cost. */
1872 3, /* cond_taken_branch_cost. */
1873 1, /* cond_not_taken_branch_cost. */
1876 const struct processor_costs *ix86_cost = &pentium_cost;
1878 /* Processor feature/optimization bitmasks. */
1879 #define m_386 (1<<PROCESSOR_I386)
1880 #define m_486 (1<<PROCESSOR_I486)
1881 #define m_PENT (1<<PROCESSOR_PENTIUM)
1882 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1883 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1884 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1885 #define m_P4_NOCONA (m_PENT4 | m_NOCONA)
1886 #define m_CORE2_32 (1<<PROCESSOR_CORE2_32)
1887 #define m_CORE2_64 (1<<PROCESSOR_CORE2_64)
1888 #define m_COREI7_32 (1<<PROCESSOR_COREI7_32)
1889 #define m_COREI7_64 (1<<PROCESSOR_COREI7_64)
1890 #define m_COREI7 (m_COREI7_32 | m_COREI7_64)
1891 #define m_CORE2I7_32 (m_CORE2_32 | m_COREI7_32)
1892 #define m_CORE2I7_64 (m_CORE2_64 | m_COREI7_64)
1893 #define m_CORE2I7 (m_CORE2I7_32 | m_CORE2I7_64)
1894 #define m_ATOM (1<<PROCESSOR_ATOM)
1896 #define m_GEODE (1<<PROCESSOR_GEODE)
1897 #define m_K6 (1<<PROCESSOR_K6)
1898 #define m_K6_GEODE (m_K6 | m_GEODE)
1899 #define m_K8 (1<<PROCESSOR_K8)
1900 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1901 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1902 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1903 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1904 #define m_BDVER2 (1<<PROCESSOR_BDVER2)
1905 #define m_BDVER (m_BDVER1 | m_BDVER2)
1906 #define m_BTVER1 (1<<PROCESSOR_BTVER1)
1907 #define m_AMD_MULTIPLE (m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1)
1909 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1910 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1912 /* Generic instruction choice should be common subset of supported CPUs
1913 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1914 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1916 /* Feature tests against the various tunings. */
1917 unsigned char ix86_tune_features[X86_TUNE_LAST];
1919 /* Feature tests against the various tunings used to create ix86_tune_features
1920 based on the processor mask. */
1921 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1922 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1923 negatively, so enabling for Generic64 seems like good code size
1924 tradeoff. We can't enable it for 32bit generic because it does not
1925 work well with PPro base chips. */
1926 m_386 | m_CORE2I7_64 | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC64,
1928 /* X86_TUNE_PUSH_MEMORY */
1929 m_386 | m_P4_NOCONA | m_CORE2I7 | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
1931 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1934 /* X86_TUNE_UNROLL_STRLEN */
1935 m_486 | m_PENT | m_PPRO | m_ATOM | m_CORE2I7 | m_K6 | m_AMD_MULTIPLE | m_GENERIC,
1937 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1938 on simulation result. But after P4 was made, no performance benefit
1939 was observed with branch hints. It also increases the code size.
1940 As a result, icc never generates branch hints. */
1943 /* X86_TUNE_DOUBLE_WITH_ADD */
1946 /* X86_TUNE_USE_SAHF */
1947 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1 | m_GENERIC,
1949 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1950 partial dependencies. */
1951 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GEODE | m_AMD_MULTIPLE | m_GENERIC,
1953 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1954 register stalls on Generic32 compilation setting as well. However
1955 in current implementation the partial register stalls are not eliminated
1956 very well - they can be introduced via subregs synthesized by combine
1957 and can happen in caller/callee saving sequences. Because this option
1958 pays back little on PPro based chips and is in conflict with partial reg
1959 dependencies used by Athlon/P4 based chips, it is better to leave it off
1960 for generic32 for now. */
1963 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1964 m_CORE2I7 | m_GENERIC,
1966 /* X86_TUNE_USE_HIMODE_FIOP */
1967 m_386 | m_486 | m_K6_GEODE,
1969 /* X86_TUNE_USE_SIMODE_FIOP */
1970 ~(m_PENT | m_PPRO | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC),
1972 /* X86_TUNE_USE_MOV0 */
1975 /* X86_TUNE_USE_CLTD */
1976 ~(m_PENT | m_CORE2I7 | m_ATOM | m_K6 | m_GENERIC),
1978 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1981 /* X86_TUNE_SPLIT_LONG_MOVES */
1984 /* X86_TUNE_READ_MODIFY_WRITE */
1987 /* X86_TUNE_READ_MODIFY */
1990 /* X86_TUNE_PROMOTE_QIMODE */
1991 m_386 | m_486 | m_PENT | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
1993 /* X86_TUNE_FAST_PREFIX */
1994 ~(m_386 | m_486 | m_PENT),
1996 /* X86_TUNE_SINGLE_STRINGOP */
1997 m_386 | m_P4_NOCONA,
1999 /* X86_TUNE_QIMODE_MATH */
2002 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
2003 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
2004 might be considered for Generic32 if our scheme for avoiding partial
2005 stalls was more effective. */
2008 /* X86_TUNE_PROMOTE_QI_REGS */
2011 /* X86_TUNE_PROMOTE_HI_REGS */
2014 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
2015 over esp addition. */
2016 m_386 | m_486 | m_PENT | m_PPRO,
2018 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
2019 over esp addition. */
2022 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
2023 over esp subtraction. */
2024 m_386 | m_486 | m_PENT | m_K6_GEODE,
2026 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
2027 over esp subtraction. */
2028 m_PENT | m_K6_GEODE,
2030 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
2031 for DFmode copies */
2032 ~(m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GEODE | m_AMD_MULTIPLE | m_ATOM | m_GENERIC),
2034 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
2035 m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
2037 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
2038 conflict here in between PPro/Pentium4 based chips that thread 128bit
2039 SSE registers as single units versus K8 based chips that divide SSE
2040 registers to two 64bit halves. This knob promotes all store destinations
2041 to be 128bit to allow register renaming on 128bit SSE units, but usually
2042 results in one extra microop on 64bit SSE units. Experimental results
2043 shows that disabling this option on P4 brings over 20% SPECfp regression,
2044 while enabling it on K8 brings roughly 2.4% regression that can be partly
2045 masked by careful scheduling of moves. */
2046 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMDFAM10 | m_BDVER | m_GENERIC,
2048 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
2049 m_COREI7 | m_AMDFAM10 | m_BDVER | m_BTVER1,
2051 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
2054 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
2057 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
2058 are resolved on SSE register parts instead of whole registers, so we may
2059 maintain just lower part of scalar values in proper format leaving the
2060 upper part undefined. */
2063 /* X86_TUNE_SSE_TYPELESS_STORES */
2066 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
2067 m_PPRO | m_P4_NOCONA,
2069 /* X86_TUNE_MEMORY_MISMATCH_STALL */
2070 m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
2072 /* X86_TUNE_PROLOGUE_USING_MOVE */
2073 m_PPRO | m_CORE2I7 | m_ATOM | m_ATHLON_K8 | m_GENERIC,
2075 /* X86_TUNE_EPILOGUE_USING_MOVE */
2076 m_PPRO | m_CORE2I7 | m_ATOM | m_ATHLON_K8 | m_GENERIC,
2078 /* X86_TUNE_SHIFT1 */
2081 /* X86_TUNE_USE_FFREEP */
2084 /* X86_TUNE_INTER_UNIT_MOVES */
2085 ~(m_AMD_MULTIPLE | m_GENERIC),
2087 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
2088 ~(m_AMDFAM10 | m_BDVER ),
2090 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
2091 than 4 branch instructions in the 16 byte window. */
2092 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
2094 /* X86_TUNE_SCHEDULE */
2095 m_PENT | m_PPRO | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
2097 /* X86_TUNE_USE_BT */
2098 m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
2100 /* X86_TUNE_USE_INCDEC */
2101 ~(m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GENERIC),
2103 /* X86_TUNE_PAD_RETURNS */
2104 m_CORE2I7 | m_AMD_MULTIPLE | m_GENERIC,
2106 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
2109 /* X86_TUNE_EXT_80387_CONSTANTS */
2110 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_ATHLON_K8 | m_GENERIC,
2112 /* X86_TUNE_SHORTEN_X87_SSE */
2115 /* X86_TUNE_AVOID_VECTOR_DECODE */
2116 m_CORE2I7_64 | m_K8 | m_GENERIC64,
2118 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
2119 and SImode multiply, but 386 and 486 do HImode multiply faster. */
2122 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
2123 vector path on AMD machines. */
2124 m_CORE2I7_64 | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1 | m_GENERIC64,
2126 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
2128 m_CORE2I7_64 | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1 | m_GENERIC64,
2130 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
2134 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
2135 but one byte longer. */
2138 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
2139 operand that cannot be represented using a modRM byte. The XOR
2140 replacement is long decoded, so this split helps here as well. */
2143 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
2145 m_CORE2I7 | m_AMDFAM10 | m_GENERIC,
2147 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
2148 from integer to FP. */
2151 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
2152 with a subsequent conditional jump instruction into a single
2153 compare-and-branch uop. */
2156 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
2157 will impact LEA instruction selection. */
2160 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
2164 /* X86_SOFTARE_PREFETCHING_BENEFICIAL: Enable software prefetching
2165 at -O3. For the moment, the prefetching seems badly tuned for Intel
2167 m_K6_GEODE | m_AMD_MULTIPLE,
2169 /* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for
2170 the auto-vectorizer. */
2173 /* X86_TUNE_REASSOC_INT_TO_PARALLEL: Try to produce parallel computations
2174 during reassociation of integer computation. */
2177 /* X86_TUNE_REASSOC_FP_TO_PARALLEL: Try to produce parallel computations
2178 during reassociation of fp computation. */
2182 /* Feature tests against the various architecture variations. */
2183 unsigned char ix86_arch_features[X86_ARCH_LAST];
2185 /* Feature tests against the various architecture variations, used to create
2186 ix86_arch_features based on the processor mask. */
2187 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
2188 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
2189 ~(m_386 | m_486 | m_PENT | m_K6),
2191 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
2194 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
2197 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
2200 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
2204 static const unsigned int x86_accumulate_outgoing_args
2205 = m_PPRO | m_P4_NOCONA | m_ATOM | m_CORE2I7 | m_AMD_MULTIPLE | m_GENERIC;
2207 static const unsigned int x86_arch_always_fancy_math_387
2208 = m_PENT | m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC;
2210 static const unsigned int x86_avx256_split_unaligned_load
2211 = m_COREI7 | m_GENERIC;
2213 static const unsigned int x86_avx256_split_unaligned_store
2214 = m_COREI7 | m_BDVER | m_GENERIC;
2216 /* In case the average insn count for single function invocation is
2217 lower than this constant, emit fast (but longer) prologue and
2219 #define FAST_PROLOGUE_INSN_COUNT 20
2221 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
2222 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
2223 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
2224 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
2226 /* Array of the smallest class containing reg number REGNO, indexed by
2227 REGNO. Used by REGNO_REG_CLASS in i386.h. */
2229 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
2231 /* ax, dx, cx, bx */
2232 AREG, DREG, CREG, BREG,
2233 /* si, di, bp, sp */
2234 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
2236 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
2237 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
2240 /* flags, fpsr, fpcr, frame */
2241 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
2243 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2246 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
2249 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2250 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2251 /* SSE REX registers */
2252 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2256 /* The "default" register map used in 32bit mode. */
2258 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
2260 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
2261 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
2262 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2263 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
2264 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
2265 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2266 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2269 /* The "default" register map used in 64bit mode. */
2271 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
2273 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
2274 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
2275 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2276 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
2277 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
2278 8,9,10,11,12,13,14,15, /* extended integer registers */
2279 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
2282 /* Define the register numbers to be used in Dwarf debugging information.
2283 The SVR4 reference port C compiler uses the following register numbers
2284 in its Dwarf output code:
2285 0 for %eax (gcc regno = 0)
2286 1 for %ecx (gcc regno = 2)
2287 2 for %edx (gcc regno = 1)
2288 3 for %ebx (gcc regno = 3)
2289 4 for %esp (gcc regno = 7)
2290 5 for %ebp (gcc regno = 6)
2291 6 for %esi (gcc regno = 4)
2292 7 for %edi (gcc regno = 5)
2293 The following three DWARF register numbers are never generated by
2294 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
2295 believes these numbers have these meanings.
2296 8 for %eip (no gcc equivalent)
2297 9 for %eflags (gcc regno = 17)
2298 10 for %trapno (no gcc equivalent)
2299 It is not at all clear how we should number the FP stack registers
2300 for the x86 architecture. If the version of SDB on x86/svr4 were
2301 a bit less brain dead with respect to floating-point then we would
2302 have a precedent to follow with respect to DWARF register numbers
2303 for x86 FP registers, but the SDB on x86/svr4 is so completely
2304 broken with respect to FP registers that it is hardly worth thinking
2305 of it as something to strive for compatibility with.
2306 The version of x86/svr4 SDB I have at the moment does (partially)
2307 seem to believe that DWARF register number 11 is associated with
2308 the x86 register %st(0), but that's about all. Higher DWARF
2309 register numbers don't seem to be associated with anything in
2310 particular, and even for DWARF regno 11, SDB only seems to under-
2311 stand that it should say that a variable lives in %st(0) (when
2312 asked via an `=' command) if we said it was in DWARF regno 11,
2313 but SDB still prints garbage when asked for the value of the
2314 variable in question (via a `/' command).
2315 (Also note that the labels SDB prints for various FP stack regs
2316 when doing an `x' command are all wrong.)
2317 Note that these problems generally don't affect the native SVR4
2318 C compiler because it doesn't allow the use of -O with -g and
2319 because when it is *not* optimizing, it allocates a memory
2320 location for each floating-point variable, and the memory
2321 location is what gets described in the DWARF AT_location
2322 attribute for the variable in question.
2323 Regardless of the severe mental illness of the x86/svr4 SDB, we
2324 do something sensible here and we use the following DWARF
2325 register numbers. Note that these are all stack-top-relative
2327 11 for %st(0) (gcc regno = 8)
2328 12 for %st(1) (gcc regno = 9)
2329 13 for %st(2) (gcc regno = 10)
2330 14 for %st(3) (gcc regno = 11)
2331 15 for %st(4) (gcc regno = 12)
2332 16 for %st(5) (gcc regno = 13)
2333 17 for %st(6) (gcc regno = 14)
2334 18 for %st(7) (gcc regno = 15)
2336 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
2338 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
2339 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
2340 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2341 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
2342 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
2343 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2344 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2347 /* Define parameter passing and return registers. */
2349 static int const x86_64_int_parameter_registers[6] =
2351 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
2354 static int const x86_64_ms_abi_int_parameter_registers[4] =
2356 CX_REG, DX_REG, R8_REG, R9_REG
2359 static int const x86_64_int_return_registers[4] =
2361 AX_REG, DX_REG, DI_REG, SI_REG
2364 /* Define the structure for the machine field in struct function. */
2366 struct GTY(()) stack_local_entry {
2367 unsigned short mode;
2370 struct stack_local_entry *next;
2373 /* Structure describing stack frame layout.
2374 Stack grows downward:
2380 saved static chain if ix86_static_chain_on_stack
2382 saved frame pointer if frame_pointer_needed
2383 <- HARD_FRAME_POINTER
2389 <- sse_regs_save_offset
2392 [va_arg registers] |
2396 [padding2] | = to_allocate
2405 int outgoing_arguments_size;
2406 HOST_WIDE_INT frame;
2408 /* The offsets relative to ARG_POINTER. */
2409 HOST_WIDE_INT frame_pointer_offset;
2410 HOST_WIDE_INT hard_frame_pointer_offset;
2411 HOST_WIDE_INT stack_pointer_offset;
2412 HOST_WIDE_INT hfp_save_offset;
2413 HOST_WIDE_INT reg_save_offset;
2414 HOST_WIDE_INT sse_reg_save_offset;
2416 /* When save_regs_using_mov is set, emit prologue using
2417 move instead of push instructions. */
2418 bool save_regs_using_mov;
2421 /* Which cpu are we scheduling for. */
2422 enum attr_cpu ix86_schedule;
2424 /* Which cpu are we optimizing for. */
2425 enum processor_type ix86_tune;
2427 /* Which instruction set architecture to use. */
2428 enum processor_type ix86_arch;
2430 /* true if sse prefetch instruction is not NOOP. */
2431 int x86_prefetch_sse;
2433 /* -mstackrealign option */
2434 static const char ix86_force_align_arg_pointer_string[]
2435 = "force_align_arg_pointer";
2437 static rtx (*ix86_gen_leave) (void);
2438 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
2439 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
2440 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
2441 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
2442 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
2443 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
2444 static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
2445 static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
2446 static rtx (*ix86_gen_probe_stack_range) (rtx, rtx, rtx);
2448 /* Preferred alignment for stack boundary in bits. */
2449 unsigned int ix86_preferred_stack_boundary;
2451 /* Alignment for incoming stack boundary in bits specified at
2453 static unsigned int ix86_user_incoming_stack_boundary;
2455 /* Default alignment for incoming stack boundary in bits. */
2456 static unsigned int ix86_default_incoming_stack_boundary;
2458 /* Alignment for incoming stack boundary in bits. */
2459 unsigned int ix86_incoming_stack_boundary;
2461 /* Calling abi specific va_list type nodes. */
2462 static GTY(()) tree sysv_va_list_type_node;
2463 static GTY(()) tree ms_va_list_type_node;
2465 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
2466 char internal_label_prefix[16];
2467 int internal_label_prefix_len;
2469 /* Fence to use after loop using movnt. */
2472 /* Register class used for passing given 64bit part of the argument.
2473 These represent classes as documented by the PS ABI, with the exception
2474 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
2475 use SF or DFmode move instead of DImode to avoid reformatting penalties.
2477 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
2478 whenever possible (upper half does contain padding). */
2479 enum x86_64_reg_class
2482 X86_64_INTEGER_CLASS,
2483 X86_64_INTEGERSI_CLASS,
2490 X86_64_COMPLEX_X87_CLASS,
2494 #define MAX_CLASSES 4
2496 /* Table of constants used by fldpi, fldln2, etc.... */
2497 static REAL_VALUE_TYPE ext_80387_constants_table [5];
2498 static bool ext_80387_constants_init = 0;
2501 static struct machine_function * ix86_init_machine_status (void);
2502 static rtx ix86_function_value (const_tree, const_tree, bool);
2503 static bool ix86_function_value_regno_p (const unsigned int);
2504 static unsigned int ix86_function_arg_boundary (enum machine_mode,
2506 static rtx ix86_static_chain (const_tree, bool);
2507 static int ix86_function_regparm (const_tree, const_tree);
2508 static void ix86_compute_frame_layout (struct ix86_frame *);
2509 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
2511 static void ix86_add_new_builtins (HOST_WIDE_INT);
2512 static tree ix86_canonical_va_list_type (tree);
2513 static void predict_jump (int);
2514 static unsigned int split_stack_prologue_scratch_regno (void);
2515 static bool i386_asm_output_addr_const_extra (FILE *, rtx);
2517 enum ix86_function_specific_strings
2519 IX86_FUNCTION_SPECIFIC_ARCH,
2520 IX86_FUNCTION_SPECIFIC_TUNE,
2521 IX86_FUNCTION_SPECIFIC_MAX
2524 static char *ix86_target_string (HOST_WIDE_INT, int, const char *,
2525 const char *, enum fpmath_unit, bool);
2526 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
2527 static void ix86_function_specific_save (struct cl_target_option *);
2528 static void ix86_function_specific_restore (struct cl_target_option *);
2529 static void ix86_function_specific_print (FILE *, int,
2530 struct cl_target_option *);
2531 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2532 static bool ix86_valid_target_attribute_inner_p (tree, char *[],
2533 struct gcc_options *);
2534 static bool ix86_can_inline_p (tree, tree);
2535 static void ix86_set_current_function (tree);
2536 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2538 static enum calling_abi ix86_function_abi (const_tree);
2541 #ifndef SUBTARGET32_DEFAULT_CPU
2542 #define SUBTARGET32_DEFAULT_CPU "i386"
2545 /* The svr4 ABI for the i386 says that records and unions are returned
2547 #ifndef DEFAULT_PCC_STRUCT_RETURN
2548 #define DEFAULT_PCC_STRUCT_RETURN 1
2551 /* Whether -mtune= or -march= were specified */
2552 static int ix86_tune_defaulted;
2553 static int ix86_arch_specified;
2555 /* Vectorization library interface and handlers. */
2556 static tree (*ix86_veclib_handler) (enum built_in_function, tree, tree);
2558 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2559 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2561 /* Processor target table, indexed by processor number */
2564 const struct processor_costs *cost; /* Processor costs */
2565 const int align_loop; /* Default alignments. */
2566 const int align_loop_max_skip;
2567 const int align_jump;
2568 const int align_jump_max_skip;
2569 const int align_func;
2572 static const struct ptt processor_target_table[PROCESSOR_max] =
2574 {&i386_cost, 4, 3, 4, 3, 4},
2575 {&i486_cost, 16, 15, 16, 15, 16},
2576 {&pentium_cost, 16, 7, 16, 7, 16},
2577 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2578 {&geode_cost, 0, 0, 0, 0, 0},
2579 {&k6_cost, 32, 7, 32, 7, 32},
2580 {&athlon_cost, 16, 7, 16, 7, 16},
2581 {&pentium4_cost, 0, 0, 0, 0, 0},
2582 {&k8_cost, 16, 7, 16, 7, 16},
2583 {&nocona_cost, 0, 0, 0, 0, 0},
2584 /* Core 2 32-bit. */
2585 {&generic32_cost, 16, 10, 16, 10, 16},
2586 /* Core 2 64-bit. */
2587 {&generic64_cost, 16, 10, 16, 10, 16},
2588 /* Core i7 32-bit. */
2589 {&generic32_cost, 16, 10, 16, 10, 16},
2590 /* Core i7 64-bit. */
2591 {&generic64_cost, 16, 10, 16, 10, 16},
2592 {&generic32_cost, 16, 7, 16, 7, 16},
2593 {&generic64_cost, 16, 10, 16, 10, 16},
2594 {&amdfam10_cost, 32, 24, 32, 7, 32},
2595 {&bdver1_cost, 32, 24, 32, 7, 32},
2596 {&bdver2_cost, 32, 24, 32, 7, 32},
2597 {&btver1_cost, 32, 24, 32, 7, 32},
2598 {&atom_cost, 16, 15, 16, 7, 16}
2601 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2631 /* Return true if a red-zone is in use. */
2634 ix86_using_red_zone (void)
2636 return TARGET_RED_ZONE && !TARGET_64BIT_MS_ABI;
2639 /* Return a string that documents the current -m options. The caller is
2640 responsible for freeing the string. */
2643 ix86_target_string (HOST_WIDE_INT isa, int flags, const char *arch,
2644 const char *tune, enum fpmath_unit fpmath,
2647 struct ix86_target_opts
2649 const char *option; /* option string */
2650 HOST_WIDE_INT mask; /* isa mask options */
2653 /* This table is ordered so that options like -msse4.2 that imply
2654 preceding options while match those first. */
2655 static struct ix86_target_opts isa_opts[] =
2657 { "-m64", OPTION_MASK_ISA_64BIT },
2658 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2659 { "-mfma", OPTION_MASK_ISA_FMA },
2660 { "-mxop", OPTION_MASK_ISA_XOP },
2661 { "-mlwp", OPTION_MASK_ISA_LWP },
2662 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2663 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2664 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2665 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2666 { "-msse3", OPTION_MASK_ISA_SSE3 },
2667 { "-msse2", OPTION_MASK_ISA_SSE2 },
2668 { "-msse", OPTION_MASK_ISA_SSE },
2669 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2670 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2671 { "-mmmx", OPTION_MASK_ISA_MMX },
2672 { "-mabm", OPTION_MASK_ISA_ABM },
2673 { "-mbmi", OPTION_MASK_ISA_BMI },
2674 { "-mbmi2", OPTION_MASK_ISA_BMI2 },
2675 { "-mlzcnt", OPTION_MASK_ISA_LZCNT },
2676 { "-mtbm", OPTION_MASK_ISA_TBM },
2677 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2678 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2679 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2680 { "-maes", OPTION_MASK_ISA_AES },
2681 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2682 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE },
2683 { "-mrdrnd", OPTION_MASK_ISA_RDRND },
2684 { "-mf16c", OPTION_MASK_ISA_F16C },
2688 static struct ix86_target_opts flag_opts[] =
2690 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2691 { "-m80387", MASK_80387 },
2692 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2693 { "-malign-double", MASK_ALIGN_DOUBLE },
2694 { "-mcld", MASK_CLD },
2695 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2696 { "-mieee-fp", MASK_IEEE_FP },
2697 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2698 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2699 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2700 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2701 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2702 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2703 { "-mno-red-zone", MASK_NO_RED_ZONE },
2704 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2705 { "-mrecip", MASK_RECIP },
2706 { "-mrtd", MASK_RTD },
2707 { "-msseregparm", MASK_SSEREGPARM },
2708 { "-mstack-arg-probe", MASK_STACK_PROBE },
2709 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2710 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS },
2711 { "-m8bit-idiv", MASK_USE_8BIT_IDIV },
2712 { "-mvzeroupper", MASK_VZEROUPPER },
2713 { "-mavx256-split-unaligned-load", MASK_AVX256_SPLIT_UNALIGNED_LOAD},
2714 { "-mavx256-split-unaligned-store", MASK_AVX256_SPLIT_UNALIGNED_STORE},
2715 { "-mprefer-avx128", MASK_PREFER_AVX128},
2718 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2721 char target_other[40];
2730 memset (opts, '\0', sizeof (opts));
2732 /* Add -march= option. */
2735 opts[num][0] = "-march=";
2736 opts[num++][1] = arch;
2739 /* Add -mtune= option. */
2742 opts[num][0] = "-mtune=";
2743 opts[num++][1] = tune;
2746 /* Pick out the options in isa options. */
2747 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2749 if ((isa & isa_opts[i].mask) != 0)
2751 opts[num++][0] = isa_opts[i].option;
2752 isa &= ~ isa_opts[i].mask;
2756 if (isa && add_nl_p)
2758 opts[num++][0] = isa_other;
2759 sprintf (isa_other, "(other isa: %#" HOST_WIDE_INT_PRINT "x)",
2763 /* Add flag options. */
2764 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2766 if ((flags & flag_opts[i].mask) != 0)
2768 opts[num++][0] = flag_opts[i].option;
2769 flags &= ~ flag_opts[i].mask;
2773 if (flags && add_nl_p)
2775 opts[num++][0] = target_other;
2776 sprintf (target_other, "(other flags: %#x)", flags);
2779 /* Add -fpmath= option. */
2782 opts[num][0] = "-mfpmath=";
2783 switch ((int) fpmath)
2786 opts[num++][1] = "387";
2790 opts[num++][1] = "sse";
2793 case FPMATH_387 | FPMATH_SSE:
2794 opts[num++][1] = "sse+387";
2806 gcc_assert (num < ARRAY_SIZE (opts));
2808 /* Size the string. */
2810 sep_len = (add_nl_p) ? 3 : 1;
2811 for (i = 0; i < num; i++)
2814 for (j = 0; j < 2; j++)
2816 len += strlen (opts[i][j]);
2819 /* Build the string. */
2820 ret = ptr = (char *) xmalloc (len);
2823 for (i = 0; i < num; i++)
2827 for (j = 0; j < 2; j++)
2828 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2835 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2843 for (j = 0; j < 2; j++)
2846 memcpy (ptr, opts[i][j], len2[j]);
2848 line_len += len2[j];
2853 gcc_assert (ret + len >= ptr);
2858 /* Return true, if profiling code should be emitted before
2859 prologue. Otherwise it returns false.
2860 Note: For x86 with "hotfix" it is sorried. */
2862 ix86_profile_before_prologue (void)
2864 return flag_fentry != 0;
2867 /* Function that is callable from the debugger to print the current
2870 ix86_debug_options (void)
2872 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2873 ix86_arch_string, ix86_tune_string,
2878 fprintf (stderr, "%s\n\n", opts);
2882 fputs ("<no options>\n\n", stderr);
2887 /* Override various settings based on options. If MAIN_ARGS_P, the
2888 options are from the command line, otherwise they are from
2892 ix86_option_override_internal (bool main_args_p)
2895 unsigned int ix86_arch_mask, ix86_tune_mask;
2896 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2901 #define PTA_3DNOW (HOST_WIDE_INT_1 << 0)
2902 #define PTA_3DNOW_A (HOST_WIDE_INT_1 << 1)
2903 #define PTA_64BIT (HOST_WIDE_INT_1 << 2)
2904 #define PTA_ABM (HOST_WIDE_INT_1 << 3)
2905 #define PTA_AES (HOST_WIDE_INT_1 << 4)
2906 #define PTA_AVX (HOST_WIDE_INT_1 << 5)
2907 #define PTA_BMI (HOST_WIDE_INT_1 << 6)
2908 #define PTA_CX16 (HOST_WIDE_INT_1 << 7)
2909 #define PTA_F16C (HOST_WIDE_INT_1 << 8)
2910 #define PTA_FMA (HOST_WIDE_INT_1 << 9)
2911 #define PTA_FMA4 (HOST_WIDE_INT_1 << 10)
2912 #define PTA_FSGSBASE (HOST_WIDE_INT_1 << 11)
2913 #define PTA_LWP (HOST_WIDE_INT_1 << 12)
2914 #define PTA_LZCNT (HOST_WIDE_INT_1 << 13)
2915 #define PTA_MMX (HOST_WIDE_INT_1 << 14)
2916 #define PTA_MOVBE (HOST_WIDE_INT_1 << 15)
2917 #define PTA_NO_SAHF (HOST_WIDE_INT_1 << 16)
2918 #define PTA_PCLMUL (HOST_WIDE_INT_1 << 17)
2919 #define PTA_POPCNT (HOST_WIDE_INT_1 << 18)
2920 #define PTA_PREFETCH_SSE (HOST_WIDE_INT_1 << 19)
2921 #define PTA_RDRND (HOST_WIDE_INT_1 << 20)
2922 #define PTA_SSE (HOST_WIDE_INT_1 << 21)
2923 #define PTA_SSE2 (HOST_WIDE_INT_1 << 22)
2924 #define PTA_SSE3 (HOST_WIDE_INT_1 << 23)
2925 #define PTA_SSE4_1 (HOST_WIDE_INT_1 << 24)
2926 #define PTA_SSE4_2 (HOST_WIDE_INT_1 << 25)
2927 #define PTA_SSE4A (HOST_WIDE_INT_1 << 26)
2928 #define PTA_SSSE3 (HOST_WIDE_INT_1 << 27)
2929 #define PTA_TBM (HOST_WIDE_INT_1 << 28)
2930 #define PTA_XOP (HOST_WIDE_INT_1 << 29)
2931 #define PTA_AVX2 (HOST_WIDE_INT_1 << 30)
2932 #define PTA_BMI2 (HOST_WIDE_INT_1 << 31)
2933 /* if this reaches 64, need to widen struct pta flags below */
2937 const char *const name; /* processor name or nickname. */
2938 const enum processor_type processor;
2939 const enum attr_cpu schedule;
2940 const unsigned HOST_WIDE_INT flags;
2942 const processor_alias_table[] =
2944 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2945 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2946 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2947 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2948 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2949 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2950 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2951 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2952 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2953 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2954 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2955 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2956 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2958 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2960 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2961 PTA_MMX | PTA_SSE | PTA_SSE2},
2962 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2963 PTA_MMX |PTA_SSE | PTA_SSE2},
2964 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2965 PTA_MMX | PTA_SSE | PTA_SSE2},
2966 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2967 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2968 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2969 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2970 | PTA_CX16 | PTA_NO_SAHF},
2971 {"core2", PROCESSOR_CORE2_64, CPU_CORE2,
2972 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2973 | PTA_SSSE3 | PTA_CX16},
2974 {"corei7", PROCESSOR_COREI7_64, CPU_COREI7,
2975 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2976 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16},
2977 {"corei7-avx", PROCESSOR_COREI7_64, CPU_COREI7,
2978 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2979 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX
2980 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL},
2981 {"core-avx-i", PROCESSOR_COREI7_64, CPU_COREI7,
2982 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2983 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX
2984 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL | PTA_FSGSBASE
2985 | PTA_RDRND | PTA_F16C},
2986 {"core-avx2", PROCESSOR_COREI7_64, CPU_COREI7,
2987 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2988 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX | PTA_AVX2
2989 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL | PTA_FSGSBASE
2990 | PTA_RDRND | PTA_F16C | PTA_BMI | PTA_BMI2 | PTA_LZCNT
2991 | PTA_FMA | PTA_MOVBE},
2992 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2993 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2994 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2995 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2996 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2997 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2998 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2999 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
3000 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
3001 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3002 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
3003 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
3004 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
3005 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3006 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
3007 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3008 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
3009 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
3010 {"x86-64", PROCESSOR_K8, CPU_K8,
3011 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
3012 {"k8", PROCESSOR_K8, CPU_K8,
3013 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3014 | PTA_SSE2 | PTA_NO_SAHF},
3015 {"k8-sse3", PROCESSOR_K8, CPU_K8,
3016 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3017 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3018 {"opteron", PROCESSOR_K8, CPU_K8,
3019 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3020 | PTA_SSE2 | PTA_NO_SAHF},
3021 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
3022 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3023 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3024 {"athlon64", PROCESSOR_K8, CPU_K8,
3025 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3026 | PTA_SSE2 | PTA_NO_SAHF},
3027 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
3028 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3029 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3030 {"athlon-fx", PROCESSOR_K8, CPU_K8,
3031 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3032 | PTA_SSE2 | PTA_NO_SAHF},
3033 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3034 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3035 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3036 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3037 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3038 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3039 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
3040 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3041 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
3042 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_FMA4
3043 | PTA_XOP | PTA_LWP},
3044 {"bdver2", PROCESSOR_BDVER2, CPU_BDVER2,
3045 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3046 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
3047 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX
3048 | PTA_XOP | PTA_LWP | PTA_BMI | PTA_TBM | PTA_F16C
3050 {"btver1", PROCESSOR_BTVER1, CPU_GENERIC64,
3051 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3052 | PTA_SSSE3 | PTA_SSE4A |PTA_ABM | PTA_CX16},
3053 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
3054 0 /* flags are only used for -march switch. */ },
3055 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
3056 PTA_64BIT /* flags are only used for -march switch. */ },
3059 /* -mrecip options. */
3062 const char *string; /* option name */
3063 unsigned int mask; /* mask bits to set */
3065 const recip_options[] =
3067 { "all", RECIP_MASK_ALL },
3068 { "none", RECIP_MASK_NONE },
3069 { "div", RECIP_MASK_DIV },
3070 { "sqrt", RECIP_MASK_SQRT },
3071 { "vec-div", RECIP_MASK_VEC_DIV },
3072 { "vec-sqrt", RECIP_MASK_VEC_SQRT },
3075 int const pta_size = ARRAY_SIZE (processor_alias_table);
3077 /* Set up prefix/suffix so the error messages refer to either the command
3078 line argument, or the attribute(target). */
3087 prefix = "option(\"";
3092 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3093 SUBTARGET_OVERRIDE_OPTIONS;
3096 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3097 SUBSUBTARGET_OVERRIDE_OPTIONS;
3101 ix86_isa_flags |= OPTION_MASK_ISA_64BIT;
3103 /* -fPIC is the default for x86_64. */
3104 if (TARGET_MACHO && TARGET_64BIT)
3107 /* Need to check -mtune=generic first. */
3108 if (ix86_tune_string)
3110 if (!strcmp (ix86_tune_string, "generic")
3111 || !strcmp (ix86_tune_string, "i686")
3112 /* As special support for cross compilers we read -mtune=native
3113 as -mtune=generic. With native compilers we won't see the
3114 -mtune=native, as it was changed by the driver. */
3115 || !strcmp (ix86_tune_string, "native"))
3118 ix86_tune_string = "generic64";
3120 ix86_tune_string = "generic32";
3122 /* If this call is for setting the option attribute, allow the
3123 generic32/generic64 that was previously set. */
3124 else if (!main_args_p
3125 && (!strcmp (ix86_tune_string, "generic32")
3126 || !strcmp (ix86_tune_string, "generic64")))
3128 else if (!strncmp (ix86_tune_string, "generic", 7))
3129 error ("bad value (%s) for %stune=%s %s",
3130 ix86_tune_string, prefix, suffix, sw);
3131 else if (!strcmp (ix86_tune_string, "x86-64"))
3132 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated; use "
3133 "%stune=k8%s or %stune=generic%s instead as appropriate",
3134 prefix, suffix, prefix, suffix, prefix, suffix);
3138 if (ix86_arch_string)
3139 ix86_tune_string = ix86_arch_string;
3140 if (!ix86_tune_string)
3142 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
3143 ix86_tune_defaulted = 1;
3146 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3147 need to use a sensible tune option. */
3148 if (!strcmp (ix86_tune_string, "generic")
3149 || !strcmp (ix86_tune_string, "x86-64")
3150 || !strcmp (ix86_tune_string, "i686"))
3153 ix86_tune_string = "generic64";
3155 ix86_tune_string = "generic32";
3159 if (ix86_stringop_alg == rep_prefix_8_byte && !TARGET_64BIT)
3161 /* rep; movq isn't available in 32-bit code. */
3162 error ("-mstringop-strategy=rep_8byte not supported for 32-bit code");
3163 ix86_stringop_alg = no_stringop;
3166 if (!ix86_arch_string)
3167 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3169 ix86_arch_specified = 1;
3171 if (!global_options_set.x_ix86_abi)
3172 ix86_abi = DEFAULT_ABI;
3174 if (global_options_set.x_ix86_cmodel)
3176 switch (ix86_cmodel)
3181 ix86_cmodel = CM_SMALL_PIC;
3183 error ("code model %qs not supported in the %s bit mode",
3190 ix86_cmodel = CM_MEDIUM_PIC;
3192 error ("code model %qs not supported in the %s bit mode",
3194 else if (TARGET_X32)
3195 error ("code model %qs not supported in x32 mode",
3202 ix86_cmodel = CM_LARGE_PIC;
3204 error ("code model %qs not supported in the %s bit mode",
3206 else if (TARGET_X32)
3207 error ("code model %qs not supported in x32 mode",
3213 error ("code model %s does not support PIC mode", "32");
3215 error ("code model %qs not supported in the %s bit mode",
3222 error ("code model %s does not support PIC mode", "kernel");
3223 ix86_cmodel = CM_32;
3226 error ("code model %qs not supported in the %s bit mode",
3236 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3237 use of rip-relative addressing. This eliminates fixups that
3238 would otherwise be needed if this object is to be placed in a
3239 DLL, and is essentially just as efficient as direct addressing. */
3240 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3241 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3242 else if (TARGET_64BIT)
3243 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3245 ix86_cmodel = CM_32;
3247 if (TARGET_MACHO && ix86_asm_dialect == ASM_INTEL)
3249 error ("-masm=intel not supported in this configuration");
3250 ix86_asm_dialect = ASM_ATT;
3252 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3253 sorry ("%i-bit mode not compiled in",
3254 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3256 for (i = 0; i < pta_size; i++)
3257 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3259 ix86_schedule = processor_alias_table[i].schedule;
3260 ix86_arch = processor_alias_table[i].processor;
3261 /* Default cpu tuning to the architecture. */
3262 ix86_tune = ix86_arch;
3264 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3265 error ("CPU you selected does not support x86-64 "
3268 if (processor_alias_table[i].flags & PTA_MMX
3269 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3270 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3271 if (processor_alias_table[i].flags & PTA_3DNOW
3272 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3273 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3274 if (processor_alias_table[i].flags & PTA_3DNOW_A
3275 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3276 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3277 if (processor_alias_table[i].flags & PTA_SSE
3278 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3279 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3280 if (processor_alias_table[i].flags & PTA_SSE2
3281 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3282 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3283 if (processor_alias_table[i].flags & PTA_SSE3
3284 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3285 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3286 if (processor_alias_table[i].flags & PTA_SSSE3
3287 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3288 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3289 if (processor_alias_table[i].flags & PTA_SSE4_1
3290 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3291 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3292 if (processor_alias_table[i].flags & PTA_SSE4_2
3293 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3294 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3295 if (processor_alias_table[i].flags & PTA_AVX
3296 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3297 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3298 if (processor_alias_table[i].flags & PTA_AVX2
3299 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX2))
3300 ix86_isa_flags |= OPTION_MASK_ISA_AVX2;
3301 if (processor_alias_table[i].flags & PTA_FMA
3302 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3303 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3304 if (processor_alias_table[i].flags & PTA_SSE4A
3305 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3306 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3307 if (processor_alias_table[i].flags & PTA_FMA4
3308 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3309 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3310 if (processor_alias_table[i].flags & PTA_XOP
3311 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3312 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3313 if (processor_alias_table[i].flags & PTA_LWP
3314 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3315 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3316 if (processor_alias_table[i].flags & PTA_ABM
3317 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3318 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3319 if (processor_alias_table[i].flags & PTA_BMI
3320 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI))
3321 ix86_isa_flags |= OPTION_MASK_ISA_BMI;
3322 if (processor_alias_table[i].flags & (PTA_LZCNT | PTA_ABM)
3323 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LZCNT))
3324 ix86_isa_flags |= OPTION_MASK_ISA_LZCNT;
3325 if (processor_alias_table[i].flags & PTA_TBM
3326 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_TBM))
3327 ix86_isa_flags |= OPTION_MASK_ISA_TBM;
3328 if (processor_alias_table[i].flags & PTA_BMI2
3329 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI2))
3330 ix86_isa_flags |= OPTION_MASK_ISA_BMI2;
3331 if (processor_alias_table[i].flags & PTA_CX16
3332 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3333 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3334 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3335 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3336 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3337 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3338 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3339 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3340 if (processor_alias_table[i].flags & PTA_MOVBE
3341 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3342 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3343 if (processor_alias_table[i].flags & PTA_AES
3344 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3345 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3346 if (processor_alias_table[i].flags & PTA_PCLMUL
3347 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3348 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3349 if (processor_alias_table[i].flags & PTA_FSGSBASE
3350 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FSGSBASE))
3351 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE;
3352 if (processor_alias_table[i].flags & PTA_RDRND
3353 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_RDRND))
3354 ix86_isa_flags |= OPTION_MASK_ISA_RDRND;
3355 if (processor_alias_table[i].flags & PTA_F16C
3356 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_F16C))
3357 ix86_isa_flags |= OPTION_MASK_ISA_F16C;
3358 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3359 x86_prefetch_sse = true;
3364 if (!strcmp (ix86_arch_string, "generic"))
3365 error ("generic CPU can be used only for %stune=%s %s",
3366 prefix, suffix, sw);
3367 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3368 error ("bad value (%s) for %sarch=%s %s",
3369 ix86_arch_string, prefix, suffix, sw);
3371 ix86_arch_mask = 1u << ix86_arch;
3372 for (i = 0; i < X86_ARCH_LAST; ++i)
3373 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3375 for (i = 0; i < pta_size; i++)
3376 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3378 ix86_schedule = processor_alias_table[i].schedule;
3379 ix86_tune = processor_alias_table[i].processor;
3382 if (!(processor_alias_table[i].flags & PTA_64BIT))
3384 if (ix86_tune_defaulted)
3386 ix86_tune_string = "x86-64";
3387 for (i = 0; i < pta_size; i++)
3388 if (! strcmp (ix86_tune_string,
3389 processor_alias_table[i].name))
3391 ix86_schedule = processor_alias_table[i].schedule;
3392 ix86_tune = processor_alias_table[i].processor;
3395 error ("CPU you selected does not support x86-64 "
3401 /* Adjust tuning when compiling for 32-bit ABI. */
3404 case PROCESSOR_GENERIC64:
3405 ix86_tune = PROCESSOR_GENERIC32;
3406 ix86_schedule = CPU_PENTIUMPRO;
3409 case PROCESSOR_CORE2_64:
3410 ix86_tune = PROCESSOR_CORE2_32;
3413 case PROCESSOR_COREI7_64:
3414 ix86_tune = PROCESSOR_COREI7_32;
3421 /* Intel CPUs have always interpreted SSE prefetch instructions as
3422 NOPs; so, we can enable SSE prefetch instructions even when
3423 -mtune (rather than -march) points us to a processor that has them.
3424 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3425 higher processors. */
3427 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3428 x86_prefetch_sse = true;
3432 if (ix86_tune_specified && i == pta_size)
3433 error ("bad value (%s) for %stune=%s %s",
3434 ix86_tune_string, prefix, suffix, sw);
3436 ix86_tune_mask = 1u << ix86_tune;
3437 for (i = 0; i < X86_TUNE_LAST; ++i)
3438 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3440 #ifndef USE_IX86_FRAME_POINTER
3441 #define USE_IX86_FRAME_POINTER 0
3444 #ifndef USE_X86_64_FRAME_POINTER
3445 #define USE_X86_64_FRAME_POINTER 0
3448 /* Set the default values for switches whose default depends on TARGET_64BIT
3449 in case they weren't overwritten by command line options. */
3452 if (optimize > 1 && !global_options_set.x_flag_zee)
3454 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3455 flag_omit_frame_pointer = !USE_X86_64_FRAME_POINTER;
3456 if (flag_asynchronous_unwind_tables == 2)
3457 flag_unwind_tables = flag_asynchronous_unwind_tables = 1;
3458 if (flag_pcc_struct_return == 2)
3459 flag_pcc_struct_return = 0;
3463 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3464 flag_omit_frame_pointer = !(USE_IX86_FRAME_POINTER || optimize_size);
3465 if (flag_asynchronous_unwind_tables == 2)
3466 flag_asynchronous_unwind_tables = !USE_IX86_FRAME_POINTER;
3467 if (flag_pcc_struct_return == 2)
3468 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
3472 ix86_cost = &ix86_size_cost;
3474 ix86_cost = processor_target_table[ix86_tune].cost;
3476 /* Arrange to set up i386_stack_locals for all functions. */
3477 init_machine_status = ix86_init_machine_status;
3479 /* Validate -mregparm= value. */
3480 if (global_options_set.x_ix86_regparm)
3483 warning (0, "-mregparm is ignored in 64-bit mode");
3484 if (ix86_regparm > REGPARM_MAX)
3486 error ("-mregparm=%d is not between 0 and %d",
3487 ix86_regparm, REGPARM_MAX);
3492 ix86_regparm = REGPARM_MAX;
3494 /* Default align_* from the processor table. */
3495 if (align_loops == 0)
3497 align_loops = processor_target_table[ix86_tune].align_loop;
3498 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3500 if (align_jumps == 0)
3502 align_jumps = processor_target_table[ix86_tune].align_jump;
3503 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3505 if (align_functions == 0)
3507 align_functions = processor_target_table[ix86_tune].align_func;
3510 /* Provide default for -mbranch-cost= value. */
3511 if (!global_options_set.x_ix86_branch_cost)
3512 ix86_branch_cost = ix86_cost->branch_cost;
3516 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3518 /* Enable by default the SSE and MMX builtins. Do allow the user to
3519 explicitly disable any of these. In particular, disabling SSE and
3520 MMX for kernel code is extremely useful. */
3521 if (!ix86_arch_specified)
3523 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3524 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3527 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3531 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3533 if (!ix86_arch_specified)
3535 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3537 /* i386 ABI does not specify red zone. It still makes sense to use it
3538 when programmer takes care to stack from being destroyed. */
3539 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3540 target_flags |= MASK_NO_RED_ZONE;
3543 /* Keep nonleaf frame pointers. */
3544 if (flag_omit_frame_pointer)
3545 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3546 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3547 flag_omit_frame_pointer = 1;
3549 /* If we're doing fast math, we don't care about comparison order
3550 wrt NaNs. This lets us use a shorter comparison sequence. */
3551 if (flag_finite_math_only)
3552 target_flags &= ~MASK_IEEE_FP;
3554 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3555 since the insns won't need emulation. */
3556 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3557 target_flags &= ~MASK_NO_FANCY_MATH_387;
3559 /* Likewise, if the target doesn't have a 387, or we've specified
3560 software floating point, don't use 387 inline intrinsics. */
3562 target_flags |= MASK_NO_FANCY_MATH_387;
3564 /* Turn on MMX builtins for -msse. */
3567 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3568 x86_prefetch_sse = true;
3571 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3572 if (TARGET_SSE4_2 || TARGET_ABM)
3573 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3575 /* Turn on lzcnt instruction for -mabm. */
3577 ix86_isa_flags |= OPTION_MASK_ISA_LZCNT & ~ix86_isa_flags_explicit;
3579 /* Validate -mpreferred-stack-boundary= value or default it to
3580 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3581 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3582 if (global_options_set.x_ix86_preferred_stack_boundary_arg)
3584 int min = (TARGET_64BIT ? 4 : 2);
3585 int max = (TARGET_SEH ? 4 : 12);
3587 if (ix86_preferred_stack_boundary_arg < min
3588 || ix86_preferred_stack_boundary_arg > max)
3591 error ("-mpreferred-stack-boundary is not supported "
3594 error ("-mpreferred-stack-boundary=%d is not between %d and %d",
3595 ix86_preferred_stack_boundary_arg, min, max);
3598 ix86_preferred_stack_boundary
3599 = (1 << ix86_preferred_stack_boundary_arg) * BITS_PER_UNIT;
3602 /* Set the default value for -mstackrealign. */
3603 if (ix86_force_align_arg_pointer == -1)
3604 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3606 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3608 /* Validate -mincoming-stack-boundary= value or default it to
3609 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3610 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3611 if (global_options_set.x_ix86_incoming_stack_boundary_arg)
3613 if (ix86_incoming_stack_boundary_arg < (TARGET_64BIT ? 4 : 2)
3614 || ix86_incoming_stack_boundary_arg > 12)
3615 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3616 ix86_incoming_stack_boundary_arg, TARGET_64BIT ? 4 : 2);
3619 ix86_user_incoming_stack_boundary
3620 = (1 << ix86_incoming_stack_boundary_arg) * BITS_PER_UNIT;
3621 ix86_incoming_stack_boundary
3622 = ix86_user_incoming_stack_boundary;
3626 /* Accept -msseregparm only if at least SSE support is enabled. */
3627 if (TARGET_SSEREGPARM
3629 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3631 if (global_options_set.x_ix86_fpmath)
3633 if (ix86_fpmath & FPMATH_SSE)
3637 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3638 ix86_fpmath = FPMATH_387;
3640 else if ((ix86_fpmath & FPMATH_387) && !TARGET_80387)
3642 warning (0, "387 instruction set disabled, using SSE arithmetics");
3643 ix86_fpmath = FPMATH_SSE;
3648 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3650 /* If the i387 is disabled, then do not return values in it. */
3652 target_flags &= ~MASK_FLOAT_RETURNS;
3654 /* Use external vectorized library in vectorizing intrinsics. */
3655 if (global_options_set.x_ix86_veclibabi_type)
3656 switch (ix86_veclibabi_type)
3658 case ix86_veclibabi_type_svml:
3659 ix86_veclib_handler = ix86_veclibabi_svml;
3662 case ix86_veclibabi_type_acml:
3663 ix86_veclib_handler = ix86_veclibabi_acml;
3670 if ((!USE_IX86_FRAME_POINTER
3671 || (x86_accumulate_outgoing_args & ix86_tune_mask))
3672 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3674 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3676 /* ??? Unwind info is not correct around the CFG unless either a frame
3677 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3678 unwind info generation to be aware of the CFG and propagating states
3680 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3681 || flag_exceptions || flag_non_call_exceptions)
3682 && flag_omit_frame_pointer
3683 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3685 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3686 warning (0, "unwind tables currently require either a frame pointer "
3687 "or %saccumulate-outgoing-args%s for correctness",
3689 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3692 /* If stack probes are required, the space used for large function
3693 arguments on the stack must also be probed, so enable
3694 -maccumulate-outgoing-args so this happens in the prologue. */
3695 if (TARGET_STACK_PROBE
3696 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3698 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3699 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3700 "for correctness", prefix, suffix);
3701 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3704 /* For sane SSE instruction set generation we need fcomi instruction.
3705 It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic
3706 expands to a sequence that includes conditional move. */
3707 if (TARGET_SSE || TARGET_RDRND)
3710 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3713 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3714 p = strchr (internal_label_prefix, 'X');
3715 internal_label_prefix_len = p - internal_label_prefix;
3719 /* When scheduling description is not available, disable scheduler pass
3720 so it won't slow down the compilation and make x87 code slower. */
3721 if (!TARGET_SCHEDULE)
3722 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3724 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3725 ix86_cost->simultaneous_prefetches,
3726 global_options.x_param_values,
3727 global_options_set.x_param_values);
3728 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, ix86_cost->prefetch_block,
3729 global_options.x_param_values,
3730 global_options_set.x_param_values);
3731 maybe_set_param_value (PARAM_L1_CACHE_SIZE, ix86_cost->l1_cache_size,
3732 global_options.x_param_values,
3733 global_options_set.x_param_values);
3734 maybe_set_param_value (PARAM_L2_CACHE_SIZE, ix86_cost->l2_cache_size,
3735 global_options.x_param_values,
3736 global_options_set.x_param_values);
3738 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
3739 if (flag_prefetch_loop_arrays < 0
3742 && TARGET_SOFTWARE_PREFETCHING_BENEFICIAL)
3743 flag_prefetch_loop_arrays = 1;
3745 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3746 can be optimized to ap = __builtin_next_arg (0). */
3747 if (!TARGET_64BIT && !flag_split_stack)
3748 targetm.expand_builtin_va_start = NULL;
3752 ix86_gen_leave = gen_leave_rex64;
3753 ix86_gen_add3 = gen_adddi3;
3754 ix86_gen_sub3 = gen_subdi3;
3755 ix86_gen_sub3_carry = gen_subdi3_carry;
3756 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3757 ix86_gen_monitor = gen_sse3_monitor64;
3758 ix86_gen_andsp = gen_anddi3;
3759 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_di;
3760 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
3761 ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
3765 ix86_gen_leave = gen_leave;
3766 ix86_gen_add3 = gen_addsi3;
3767 ix86_gen_sub3 = gen_subsi3;
3768 ix86_gen_sub3_carry = gen_subsi3_carry;
3769 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3770 ix86_gen_monitor = gen_sse3_monitor;
3771 ix86_gen_andsp = gen_andsi3;
3772 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_si;
3773 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
3774 ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
3778 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3780 target_flags |= MASK_CLD & ~target_flags_explicit;
3783 if (!TARGET_64BIT && flag_pic)
3785 if (flag_fentry > 0)
3786 sorry ("-mfentry isn%'t supported for 32-bit in combination "
3790 else if (TARGET_SEH)
3792 if (flag_fentry == 0)
3793 sorry ("-mno-fentry isn%'t compatible with SEH");
3796 else if (flag_fentry < 0)
3798 #if defined(PROFILE_BEFORE_PROLOGUE)
3807 /* When not optimize for size, enable vzeroupper optimization for
3808 TARGET_AVX with -fexpensive-optimizations and split 32-byte
3809 AVX unaligned load/store. */
3812 if (flag_expensive_optimizations
3813 && !(target_flags_explicit & MASK_VZEROUPPER))
3814 target_flags |= MASK_VZEROUPPER;
3815 if ((x86_avx256_split_unaligned_load & ix86_tune_mask)
3816 && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD))
3817 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD;
3818 if ((x86_avx256_split_unaligned_store & ix86_tune_mask)
3819 && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE))
3820 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE;
3821 /* Enable 128-bit AVX instruction generation for the auto-vectorizer. */
3822 if (TARGET_AVX128_OPTIMAL && !(target_flags_explicit & MASK_PREFER_AVX128))
3823 target_flags |= MASK_PREFER_AVX128;
3828 /* Disable vzeroupper pass if TARGET_AVX is disabled. */
3829 target_flags &= ~MASK_VZEROUPPER;
3832 if (ix86_recip_name)
3834 char *p = ASTRDUP (ix86_recip_name);
3836 unsigned int mask, i;
3839 while ((q = strtok (p, ",")) != NULL)
3850 if (!strcmp (q, "default"))
3851 mask = RECIP_MASK_ALL;
3854 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3855 if (!strcmp (q, recip_options[i].string))
3857 mask = recip_options[i].mask;
3861 if (i == ARRAY_SIZE (recip_options))
3863 error ("unknown option for -mrecip=%s", q);
3865 mask = RECIP_MASK_NONE;
3869 recip_mask_explicit |= mask;
3871 recip_mask &= ~mask;
3878 recip_mask |= RECIP_MASK_ALL & ~recip_mask_explicit;
3879 else if (target_flags_explicit & MASK_RECIP)
3880 recip_mask &= ~(RECIP_MASK_ALL & ~recip_mask_explicit);
3882 /* Save the initial options in case the user does function specific
3885 target_option_default_node = target_option_current_node
3886 = build_target_option_node ();
3889 /* Return TRUE if VAL is passed in register with 256bit AVX modes. */
3892 function_pass_avx256_p (const_rtx val)
3897 if (REG_P (val) && VALID_AVX256_REG_MODE (GET_MODE (val)))
3900 if (GET_CODE (val) == PARALLEL)
3905 for (i = XVECLEN (val, 0) - 1; i >= 0; i--)
3907 r = XVECEXP (val, 0, i);
3908 if (GET_CODE (r) == EXPR_LIST
3910 && REG_P (XEXP (r, 0))
3911 && (GET_MODE (XEXP (r, 0)) == OImode
3912 || VALID_AVX256_REG_MODE (GET_MODE (XEXP (r, 0)))))
3920 /* Implement the TARGET_OPTION_OVERRIDE hook. */
3923 ix86_option_override (void)
3925 ix86_option_override_internal (true);
3928 /* Update register usage after having seen the compiler flags. */
3931 ix86_conditional_register_usage (void)
3936 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3938 if (fixed_regs[i] > 1)
3939 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3940 if (call_used_regs[i] > 1)
3941 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3944 /* The PIC register, if it exists, is fixed. */
3945 j = PIC_OFFSET_TABLE_REGNUM;
3946 if (j != INVALID_REGNUM)
3947 fixed_regs[j] = call_used_regs[j] = 1;
3949 /* The 64-bit MS_ABI changes the set of call-used registers. */
3950 if (TARGET_64BIT_MS_ABI)
3952 call_used_regs[SI_REG] = 0;
3953 call_used_regs[DI_REG] = 0;
3954 call_used_regs[XMM6_REG] = 0;
3955 call_used_regs[XMM7_REG] = 0;
3956 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3957 call_used_regs[i] = 0;
3960 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3961 other call-clobbered regs for 64-bit. */
3964 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3966 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3967 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3968 && call_used_regs[i])
3969 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3972 /* If MMX is disabled, squash the registers. */
3974 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3975 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3976 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3978 /* If SSE is disabled, squash the registers. */
3980 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3981 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3982 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3984 /* If the FPU is disabled, squash the registers. */
3985 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3986 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3987 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3988 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3990 /* If 32-bit, squash the 64-bit registers. */
3993 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3995 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
4001 /* Save the current options */
4004 ix86_function_specific_save (struct cl_target_option *ptr)
4006 ptr->arch = ix86_arch;
4007 ptr->schedule = ix86_schedule;
4008 ptr->tune = ix86_tune;
4009 ptr->branch_cost = ix86_branch_cost;
4010 ptr->tune_defaulted = ix86_tune_defaulted;
4011 ptr->arch_specified = ix86_arch_specified;
4012 ptr->x_ix86_isa_flags_explicit = ix86_isa_flags_explicit;
4013 ptr->ix86_target_flags_explicit = target_flags_explicit;
4014 ptr->x_recip_mask_explicit = recip_mask_explicit;
4016 /* The fields are char but the variables are not; make sure the
4017 values fit in the fields. */
4018 gcc_assert (ptr->arch == ix86_arch);
4019 gcc_assert (ptr->schedule == ix86_schedule);
4020 gcc_assert (ptr->tune == ix86_tune);
4021 gcc_assert (ptr->branch_cost == ix86_branch_cost);
4024 /* Restore the current options */
4027 ix86_function_specific_restore (struct cl_target_option *ptr)
4029 enum processor_type old_tune = ix86_tune;
4030 enum processor_type old_arch = ix86_arch;
4031 unsigned int ix86_arch_mask, ix86_tune_mask;
4034 ix86_arch = (enum processor_type) ptr->arch;
4035 ix86_schedule = (enum attr_cpu) ptr->schedule;
4036 ix86_tune = (enum processor_type) ptr->tune;
4037 ix86_branch_cost = ptr->branch_cost;
4038 ix86_tune_defaulted = ptr->tune_defaulted;
4039 ix86_arch_specified = ptr->arch_specified;
4040 ix86_isa_flags_explicit = ptr->x_ix86_isa_flags_explicit;
4041 target_flags_explicit = ptr->ix86_target_flags_explicit;
4042 recip_mask_explicit = ptr->x_recip_mask_explicit;
4044 /* Recreate the arch feature tests if the arch changed */
4045 if (old_arch != ix86_arch)
4047 ix86_arch_mask = 1u << ix86_arch;
4048 for (i = 0; i < X86_ARCH_LAST; ++i)
4049 ix86_arch_features[i]
4050 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
4053 /* Recreate the tune optimization tests */
4054 if (old_tune != ix86_tune)
4056 ix86_tune_mask = 1u << ix86_tune;
4057 for (i = 0; i < X86_TUNE_LAST; ++i)
4058 ix86_tune_features[i]
4059 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
4063 /* Print the current options */
4066 ix86_function_specific_print (FILE *file, int indent,
4067 struct cl_target_option *ptr)
4070 = ix86_target_string (ptr->x_ix86_isa_flags, ptr->x_target_flags,
4071 NULL, NULL, ptr->x_ix86_fpmath, false);
4073 fprintf (file, "%*sarch = %d (%s)\n",
4076 ((ptr->arch < TARGET_CPU_DEFAULT_max)
4077 ? cpu_names[ptr->arch]
4080 fprintf (file, "%*stune = %d (%s)\n",
4083 ((ptr->tune < TARGET_CPU_DEFAULT_max)
4084 ? cpu_names[ptr->tune]
4087 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
4091 fprintf (file, "%*s%s\n", indent, "", target_string);
4092 free (target_string);
4097 /* Inner function to process the attribute((target(...))), take an argument and
4098 set the current options from the argument. If we have a list, recursively go
4102 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[],
4103 struct gcc_options *enum_opts_set)
4108 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
4109 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
4110 #define IX86_ATTR_ENUM(S,O) { S, sizeof (S)-1, ix86_opt_enum, O, 0 }
4111 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
4112 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
4128 enum ix86_opt_type type;
4133 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
4134 IX86_ATTR_ISA ("abm", OPT_mabm),
4135 IX86_ATTR_ISA ("bmi", OPT_mbmi),
4136 IX86_ATTR_ISA ("bmi2", OPT_mbmi2),
4137 IX86_ATTR_ISA ("lzcnt", OPT_mlzcnt),
4138 IX86_ATTR_ISA ("tbm", OPT_mtbm),
4139 IX86_ATTR_ISA ("aes", OPT_maes),
4140 IX86_ATTR_ISA ("avx", OPT_mavx),
4141 IX86_ATTR_ISA ("avx2", OPT_mavx2),
4142 IX86_ATTR_ISA ("mmx", OPT_mmmx),
4143 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
4144 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
4145 IX86_ATTR_ISA ("sse", OPT_msse),
4146 IX86_ATTR_ISA ("sse2", OPT_msse2),
4147 IX86_ATTR_ISA ("sse3", OPT_msse3),
4148 IX86_ATTR_ISA ("sse4", OPT_msse4),
4149 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
4150 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
4151 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
4152 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
4153 IX86_ATTR_ISA ("fma4", OPT_mfma4),
4154 IX86_ATTR_ISA ("fma", OPT_mfma),
4155 IX86_ATTR_ISA ("xop", OPT_mxop),
4156 IX86_ATTR_ISA ("lwp", OPT_mlwp),
4157 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase),
4158 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd),
4159 IX86_ATTR_ISA ("f16c", OPT_mf16c),
4162 IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_),
4164 /* string options */
4165 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
4166 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
4169 IX86_ATTR_YES ("cld",
4173 IX86_ATTR_NO ("fancy-math-387",
4174 OPT_mfancy_math_387,
4175 MASK_NO_FANCY_MATH_387),
4177 IX86_ATTR_YES ("ieee-fp",
4181 IX86_ATTR_YES ("inline-all-stringops",
4182 OPT_minline_all_stringops,
4183 MASK_INLINE_ALL_STRINGOPS),
4185 IX86_ATTR_YES ("inline-stringops-dynamically",
4186 OPT_minline_stringops_dynamically,
4187 MASK_INLINE_STRINGOPS_DYNAMICALLY),
4189 IX86_ATTR_NO ("align-stringops",
4190 OPT_mno_align_stringops,
4191 MASK_NO_ALIGN_STRINGOPS),
4193 IX86_ATTR_YES ("recip",
4199 /* If this is a list, recurse to get the options. */
4200 if (TREE_CODE (args) == TREE_LIST)
4204 for (; args; args = TREE_CHAIN (args))
4205 if (TREE_VALUE (args)
4206 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args),
4207 p_strings, enum_opts_set))
4213 else if (TREE_CODE (args) != STRING_CST)
4216 /* Handle multiple arguments separated by commas. */
4217 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
4219 while (next_optstr && *next_optstr != '\0')
4221 char *p = next_optstr;
4223 char *comma = strchr (next_optstr, ',');
4224 const char *opt_string;
4225 size_t len, opt_len;
4230 enum ix86_opt_type type = ix86_opt_unknown;
4236 len = comma - next_optstr;
4237 next_optstr = comma + 1;
4245 /* Recognize no-xxx. */
4246 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
4255 /* Find the option. */
4258 for (i = 0; i < ARRAY_SIZE (attrs); i++)
4260 type = attrs[i].type;
4261 opt_len = attrs[i].len;
4262 if (ch == attrs[i].string[0]
4263 && ((type != ix86_opt_str && type != ix86_opt_enum)
4266 && memcmp (p, attrs[i].string, opt_len) == 0)
4269 mask = attrs[i].mask;
4270 opt_string = attrs[i].string;
4275 /* Process the option. */
4278 error ("attribute(target(\"%s\")) is unknown", orig_p);
4282 else if (type == ix86_opt_isa)
4284 struct cl_decoded_option decoded;
4286 generate_option (opt, NULL, opt_set_p, CL_TARGET, &decoded);
4287 ix86_handle_option (&global_options, &global_options_set,
4288 &decoded, input_location);
4291 else if (type == ix86_opt_yes || type == ix86_opt_no)
4293 if (type == ix86_opt_no)
4294 opt_set_p = !opt_set_p;
4297 target_flags |= mask;
4299 target_flags &= ~mask;
4302 else if (type == ix86_opt_str)
4306 error ("option(\"%s\") was already specified", opt_string);
4310 p_strings[opt] = xstrdup (p + opt_len);
4313 else if (type == ix86_opt_enum)
4318 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
4320 set_option (&global_options, enum_opts_set, opt, value,
4321 p + opt_len, DK_UNSPECIFIED, input_location,
4325 error ("attribute(target(\"%s\")) is unknown", orig_p);
4337 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4340 ix86_valid_target_attribute_tree (tree args)
4342 const char *orig_arch_string = ix86_arch_string;
4343 const char *orig_tune_string = ix86_tune_string;
4344 enum fpmath_unit orig_fpmath_set = global_options_set.x_ix86_fpmath;
4345 int orig_tune_defaulted = ix86_tune_defaulted;
4346 int orig_arch_specified = ix86_arch_specified;
4347 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL };
4350 struct cl_target_option *def
4351 = TREE_TARGET_OPTION (target_option_default_node);
4352 struct gcc_options enum_opts_set;
4354 memset (&enum_opts_set, 0, sizeof (enum_opts_set));
4356 /* Process each of the options on the chain. */
4357 if (! ix86_valid_target_attribute_inner_p (args, option_strings,
4361 /* If the changed options are different from the default, rerun
4362 ix86_option_override_internal, and then save the options away.
4363 The string options are are attribute options, and will be undone
4364 when we copy the save structure. */
4365 if (ix86_isa_flags != def->x_ix86_isa_flags
4366 || target_flags != def->x_target_flags
4367 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4368 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4369 || enum_opts_set.x_ix86_fpmath)
4371 /* If we are using the default tune= or arch=, undo the string assigned,
4372 and use the default. */
4373 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4374 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4375 else if (!orig_arch_specified)
4376 ix86_arch_string = NULL;
4378 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4379 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4380 else if (orig_tune_defaulted)
4381 ix86_tune_string = NULL;
4383 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4384 if (enum_opts_set.x_ix86_fpmath)
4385 global_options_set.x_ix86_fpmath = (enum fpmath_unit) 1;
4386 else if (!TARGET_64BIT && TARGET_SSE)
4388 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
4389 global_options_set.x_ix86_fpmath = (enum fpmath_unit) 1;
4392 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4393 ix86_option_override_internal (false);
4395 /* Add any builtin functions with the new isa if any. */
4396 ix86_add_new_builtins (ix86_isa_flags);
4398 /* Save the current options unless we are validating options for
4400 t = build_target_option_node ();
4402 ix86_arch_string = orig_arch_string;
4403 ix86_tune_string = orig_tune_string;
4404 global_options_set.x_ix86_fpmath = orig_fpmath_set;
4406 /* Free up memory allocated to hold the strings */
4407 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4408 free (option_strings[i]);
4414 /* Hook to validate attribute((target("string"))). */
4417 ix86_valid_target_attribute_p (tree fndecl,
4418 tree ARG_UNUSED (name),
4420 int ARG_UNUSED (flags))
4422 struct cl_target_option cur_target;
4424 tree old_optimize = build_optimization_node ();
4425 tree new_target, new_optimize;
4426 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4428 /* If the function changed the optimization levels as well as setting target
4429 options, start with the optimizations specified. */
4430 if (func_optimize && func_optimize != old_optimize)
4431 cl_optimization_restore (&global_options,
4432 TREE_OPTIMIZATION (func_optimize));
4434 /* The target attributes may also change some optimization flags, so update
4435 the optimization options if necessary. */
4436 cl_target_option_save (&cur_target, &global_options);
4437 new_target = ix86_valid_target_attribute_tree (args);
4438 new_optimize = build_optimization_node ();
4445 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4447 if (old_optimize != new_optimize)
4448 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4451 cl_target_option_restore (&global_options, &cur_target);
4453 if (old_optimize != new_optimize)
4454 cl_optimization_restore (&global_options,
4455 TREE_OPTIMIZATION (old_optimize));
4461 /* Hook to determine if one function can safely inline another. */
4464 ix86_can_inline_p (tree caller, tree callee)
4467 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4468 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4470 /* If callee has no option attributes, then it is ok to inline. */
4474 /* If caller has no option attributes, but callee does then it is not ok to
4476 else if (!caller_tree)
4481 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4482 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4484 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4485 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4487 if ((caller_opts->x_ix86_isa_flags & callee_opts->x_ix86_isa_flags)
4488 != callee_opts->x_ix86_isa_flags)
4491 /* See if we have the same non-isa options. */
4492 else if (caller_opts->x_target_flags != callee_opts->x_target_flags)
4495 /* See if arch, tune, etc. are the same. */
4496 else if (caller_opts->arch != callee_opts->arch)
4499 else if (caller_opts->tune != callee_opts->tune)
4502 else if (caller_opts->x_ix86_fpmath != callee_opts->x_ix86_fpmath)
4505 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4516 /* Remember the last target of ix86_set_current_function. */
4517 static GTY(()) tree ix86_previous_fndecl;
4519 /* Establish appropriate back-end context for processing the function
4520 FNDECL. The argument might be NULL to indicate processing at top
4521 level, outside of any function scope. */
4523 ix86_set_current_function (tree fndecl)
4525 /* Only change the context if the function changes. This hook is called
4526 several times in the course of compiling a function, and we don't want to
4527 slow things down too much or call target_reinit when it isn't safe. */
4528 if (fndecl && fndecl != ix86_previous_fndecl)
4530 tree old_tree = (ix86_previous_fndecl
4531 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4534 tree new_tree = (fndecl
4535 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4538 ix86_previous_fndecl = fndecl;
4539 if (old_tree == new_tree)
4544 cl_target_option_restore (&global_options,
4545 TREE_TARGET_OPTION (new_tree));
4551 struct cl_target_option *def
4552 = TREE_TARGET_OPTION (target_option_current_node);
4554 cl_target_option_restore (&global_options, def);
4561 /* Return true if this goes in large data/bss. */
4564 ix86_in_large_data_p (tree exp)
4566 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4569 /* Functions are never large data. */
4570 if (TREE_CODE (exp) == FUNCTION_DECL)
4573 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4575 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4576 if (strcmp (section, ".ldata") == 0
4577 || strcmp (section, ".lbss") == 0)
4583 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4585 /* If this is an incomplete type with size 0, then we can't put it
4586 in data because it might be too big when completed. */
4587 if (!size || size > ix86_section_threshold)
4594 /* Switch to the appropriate section for output of DECL.
4595 DECL is either a `VAR_DECL' node or a constant of some sort.
4596 RELOC indicates whether forming the initial value of DECL requires
4597 link-time relocations. */
4599 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4603 x86_64_elf_select_section (tree decl, int reloc,
4604 unsigned HOST_WIDE_INT align)
4606 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4607 && ix86_in_large_data_p (decl))
4609 const char *sname = NULL;
4610 unsigned int flags = SECTION_WRITE;
4611 switch (categorize_decl_for_section (decl, reloc))
4616 case SECCAT_DATA_REL:
4617 sname = ".ldata.rel";
4619 case SECCAT_DATA_REL_LOCAL:
4620 sname = ".ldata.rel.local";
4622 case SECCAT_DATA_REL_RO:
4623 sname = ".ldata.rel.ro";
4625 case SECCAT_DATA_REL_RO_LOCAL:
4626 sname = ".ldata.rel.ro.local";
4630 flags |= SECTION_BSS;
4633 case SECCAT_RODATA_MERGE_STR:
4634 case SECCAT_RODATA_MERGE_STR_INIT:
4635 case SECCAT_RODATA_MERGE_CONST:
4639 case SECCAT_SRODATA:
4646 /* We don't split these for medium model. Place them into
4647 default sections and hope for best. */
4652 /* We might get called with string constants, but get_named_section
4653 doesn't like them as they are not DECLs. Also, we need to set
4654 flags in that case. */
4656 return get_section (sname, flags, NULL);
4657 return get_named_section (decl, sname, reloc);
4660 return default_elf_select_section (decl, reloc, align);
4663 /* Build up a unique section name, expressed as a
4664 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4665 RELOC indicates whether the initial value of EXP requires
4666 link-time relocations. */
4668 static void ATTRIBUTE_UNUSED
4669 x86_64_elf_unique_section (tree decl, int reloc)
4671 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4672 && ix86_in_large_data_p (decl))
4674 const char *prefix = NULL;
4675 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4676 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4678 switch (categorize_decl_for_section (decl, reloc))
4681 case SECCAT_DATA_REL:
4682 case SECCAT_DATA_REL_LOCAL:
4683 case SECCAT_DATA_REL_RO:
4684 case SECCAT_DATA_REL_RO_LOCAL:
4685 prefix = one_only ? ".ld" : ".ldata";
4688 prefix = one_only ? ".lb" : ".lbss";
4691 case SECCAT_RODATA_MERGE_STR:
4692 case SECCAT_RODATA_MERGE_STR_INIT:
4693 case SECCAT_RODATA_MERGE_CONST:
4694 prefix = one_only ? ".lr" : ".lrodata";
4696 case SECCAT_SRODATA:
4703 /* We don't split these for medium model. Place them into
4704 default sections and hope for best. */
4709 const char *name, *linkonce;
4712 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4713 name = targetm.strip_name_encoding (name);
4715 /* If we're using one_only, then there needs to be a .gnu.linkonce
4716 prefix to the section name. */
4717 linkonce = one_only ? ".gnu.linkonce" : "";
4719 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4721 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4725 default_unique_section (decl, reloc);
4728 #ifdef COMMON_ASM_OP
4729 /* This says how to output assembler code to declare an
4730 uninitialized external linkage data object.
4732 For medium model x86-64 we need to use .largecomm opcode for
4735 x86_elf_aligned_common (FILE *file,
4736 const char *name, unsigned HOST_WIDE_INT size,
4739 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4740 && size > (unsigned int)ix86_section_threshold)
4741 fputs (".largecomm\t", file);
4743 fputs (COMMON_ASM_OP, file);
4744 assemble_name (file, name);
4745 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4746 size, align / BITS_PER_UNIT);
4750 /* Utility function for targets to use in implementing
4751 ASM_OUTPUT_ALIGNED_BSS. */
4754 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4755 const char *name, unsigned HOST_WIDE_INT size,
4758 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4759 && size > (unsigned int)ix86_section_threshold)
4760 switch_to_section (get_named_section (decl, ".lbss", 0));
4762 switch_to_section (bss_section);
4763 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4764 #ifdef ASM_DECLARE_OBJECT_NAME
4765 last_assemble_variable_decl = decl;
4766 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4768 /* Standard thing is just output label for the object. */
4769 ASM_OUTPUT_LABEL (file, name);
4770 #endif /* ASM_DECLARE_OBJECT_NAME */
4771 ASM_OUTPUT_SKIP (file, size ? size : 1);
4774 /* Decide whether we must probe the stack before any space allocation
4775 on this target. It's essentially TARGET_STACK_PROBE except when
4776 -fstack-check causes the stack to be already probed differently. */
4779 ix86_target_stack_probe (void)
4781 /* Do not probe the stack twice if static stack checking is enabled. */
4782 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
4785 return TARGET_STACK_PROBE;
4788 /* Decide whether we can make a sibling call to a function. DECL is the
4789 declaration of the function being targeted by the call and EXP is the
4790 CALL_EXPR representing the call. */
4793 ix86_function_ok_for_sibcall (tree decl, tree exp)
4795 tree type, decl_or_type;
4798 /* If we are generating position-independent code, we cannot sibcall
4799 optimize any indirect call, or a direct call to a global function,
4800 as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
4804 && (!decl || !targetm.binds_local_p (decl)))
4807 /* If we need to align the outgoing stack, then sibcalling would
4808 unalign the stack, which may break the called function. */
4809 if (ix86_minimum_incoming_stack_boundary (true)
4810 < PREFERRED_STACK_BOUNDARY)
4815 decl_or_type = decl;
4816 type = TREE_TYPE (decl);
4820 /* We're looking at the CALL_EXPR, we need the type of the function. */
4821 type = CALL_EXPR_FN (exp); /* pointer expression */
4822 type = TREE_TYPE (type); /* pointer type */
4823 type = TREE_TYPE (type); /* function type */
4824 decl_or_type = type;
4827 /* Check that the return value locations are the same. Like
4828 if we are returning floats on the 80387 register stack, we cannot
4829 make a sibcall from a function that doesn't return a float to a
4830 function that does or, conversely, from a function that does return
4831 a float to a function that doesn't; the necessary stack adjustment
4832 would not be executed. This is also the place we notice
4833 differences in the return value ABI. Note that it is ok for one
4834 of the functions to have void return type as long as the return
4835 value of the other is passed in a register. */
4836 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4837 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4839 if (STACK_REG_P (a) || STACK_REG_P (b))
4841 if (!rtx_equal_p (a, b))
4844 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4846 /* Disable sibcall if we need to generate vzeroupper after
4848 if (TARGET_VZEROUPPER
4849 && cfun->machine->callee_return_avx256_p
4850 && !cfun->machine->caller_return_avx256_p)
4853 else if (!rtx_equal_p (a, b))
4858 /* The SYSV ABI has more call-clobbered registers;
4859 disallow sibcalls from MS to SYSV. */
4860 if (cfun->machine->call_abi == MS_ABI
4861 && ix86_function_type_abi (type) == SYSV_ABI)
4866 /* If this call is indirect, we'll need to be able to use a
4867 call-clobbered register for the address of the target function.
4868 Make sure that all such registers are not used for passing
4869 parameters. Note that DLLIMPORT functions are indirect. */
4871 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4873 if (ix86_function_regparm (type, NULL) >= 3)
4875 /* ??? Need to count the actual number of registers to be used,
4876 not the possible number of registers. Fix later. */
4882 /* Otherwise okay. That also includes certain types of indirect calls. */
4886 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4887 and "sseregparm" calling convention attributes;
4888 arguments as in struct attribute_spec.handler. */
4891 ix86_handle_cconv_attribute (tree *node, tree name,
4893 int flags ATTRIBUTE_UNUSED,
4896 if (TREE_CODE (*node) != FUNCTION_TYPE
4897 && TREE_CODE (*node) != METHOD_TYPE
4898 && TREE_CODE (*node) != FIELD_DECL
4899 && TREE_CODE (*node) != TYPE_DECL)
4901 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4903 *no_add_attrs = true;
4907 /* Can combine regparm with all attributes but fastcall, and thiscall. */
4908 if (is_attribute_p ("regparm", name))
4912 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4914 error ("fastcall and regparm attributes are not compatible");
4917 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4919 error ("regparam and thiscall attributes are not compatible");
4922 cst = TREE_VALUE (args);
4923 if (TREE_CODE (cst) != INTEGER_CST)
4925 warning (OPT_Wattributes,
4926 "%qE attribute requires an integer constant argument",
4928 *no_add_attrs = true;
4930 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4932 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4934 *no_add_attrs = true;
4942 /* Do not warn when emulating the MS ABI. */
4943 if ((TREE_CODE (*node) != FUNCTION_TYPE
4944 && TREE_CODE (*node) != METHOD_TYPE)
4945 || ix86_function_type_abi (*node) != MS_ABI)
4946 warning (OPT_Wattributes, "%qE attribute ignored",
4948 *no_add_attrs = true;
4952 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4953 if (is_attribute_p ("fastcall", name))
4955 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4957 error ("fastcall and cdecl attributes are not compatible");
4959 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4961 error ("fastcall and stdcall attributes are not compatible");
4963 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4965 error ("fastcall and regparm attributes are not compatible");
4967 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4969 error ("fastcall and thiscall attributes are not compatible");
4973 /* Can combine stdcall with fastcall (redundant), regparm and
4975 else if (is_attribute_p ("stdcall", name))
4977 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4979 error ("stdcall and cdecl attributes are not compatible");
4981 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4983 error ("stdcall and fastcall attributes are not compatible");
4985 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4987 error ("stdcall and thiscall attributes are not compatible");
4991 /* Can combine cdecl with regparm and sseregparm. */
4992 else if (is_attribute_p ("cdecl", name))
4994 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4996 error ("stdcall and cdecl attributes are not compatible");
4998 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5000 error ("fastcall and cdecl attributes are not compatible");
5002 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
5004 error ("cdecl and thiscall attributes are not compatible");
5007 else if (is_attribute_p ("thiscall", name))
5009 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
5010 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
5012 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
5014 error ("stdcall and thiscall attributes are not compatible");
5016 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
5018 error ("fastcall and thiscall attributes are not compatible");
5020 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
5022 error ("cdecl and thiscall attributes are not compatible");
5026 /* Can combine sseregparm with all attributes. */
5031 /* The transactional memory builtins are implicitly regparm or fastcall
5032 depending on the ABI. Override the generic do-nothing attribute that
5033 these builtins were declared with, and replace it with one of the two
5034 attributes that we expect elsewhere. */
5037 ix86_handle_tm_regparm_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
5038 tree args ATTRIBUTE_UNUSED,
5039 int flags ATTRIBUTE_UNUSED,
5044 /* In no case do we want to add the placeholder attribute. */
5045 *no_add_attrs = true;
5047 /* The 64-bit ABI is unchanged for transactional memory. */
5051 /* ??? Is there a better way to validate 32-bit windows? We have
5052 cfun->machine->call_abi, but that seems to be set only for 64-bit. */
5053 if (CHECK_STACK_LIMIT > 0)
5054 alt = tree_cons (get_identifier ("fastcall"), NULL, NULL);
5057 alt = tree_cons (NULL, build_int_cst (NULL, 2), NULL);
5058 alt = tree_cons (get_identifier ("regparm"), alt, NULL);
5060 decl_attributes (node, alt, flags);
5065 /* This function determines from TYPE the calling-convention. */
5068 ix86_get_callcvt (const_tree type)
5070 unsigned int ret = 0;
5075 return IX86_CALLCVT_CDECL;
5077 attrs = TYPE_ATTRIBUTES (type);
5078 if (attrs != NULL_TREE)
5080 if (lookup_attribute ("cdecl", attrs))
5081 ret |= IX86_CALLCVT_CDECL;
5082 else if (lookup_attribute ("stdcall", attrs))
5083 ret |= IX86_CALLCVT_STDCALL;
5084 else if (lookup_attribute ("fastcall", attrs))
5085 ret |= IX86_CALLCVT_FASTCALL;
5086 else if (lookup_attribute ("thiscall", attrs))
5087 ret |= IX86_CALLCVT_THISCALL;
5089 /* Regparam isn't allowed for thiscall and fastcall. */
5090 if ((ret & (IX86_CALLCVT_THISCALL | IX86_CALLCVT_FASTCALL)) == 0)
5092 if (lookup_attribute ("regparm", attrs))
5093 ret |= IX86_CALLCVT_REGPARM;
5094 if (lookup_attribute ("sseregparm", attrs))
5095 ret |= IX86_CALLCVT_SSEREGPARM;
5098 if (IX86_BASE_CALLCVT(ret) != 0)
5102 is_stdarg = stdarg_p (type);
5103 if (TARGET_RTD && !is_stdarg)
5104 return IX86_CALLCVT_STDCALL | ret;
5108 || TREE_CODE (type) != METHOD_TYPE
5109 || ix86_function_type_abi (type) != MS_ABI)
5110 return IX86_CALLCVT_CDECL | ret;
5112 return IX86_CALLCVT_THISCALL;
5115 /* Return 0 if the attributes for two types are incompatible, 1 if they
5116 are compatible, and 2 if they are nearly compatible (which causes a
5117 warning to be generated). */
5120 ix86_comp_type_attributes (const_tree type1, const_tree type2)
5122 unsigned int ccvt1, ccvt2;
5124 if (TREE_CODE (type1) != FUNCTION_TYPE
5125 && TREE_CODE (type1) != METHOD_TYPE)
5128 ccvt1 = ix86_get_callcvt (type1);
5129 ccvt2 = ix86_get_callcvt (type2);
5132 if (ix86_function_regparm (type1, NULL)
5133 != ix86_function_regparm (type2, NULL))
5139 /* Return the regparm value for a function with the indicated TYPE and DECL.
5140 DECL may be NULL when calling function indirectly
5141 or considering a libcall. */
5144 ix86_function_regparm (const_tree type, const_tree decl)
5151 return (ix86_function_type_abi (type) == SYSV_ABI
5152 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
5153 ccvt = ix86_get_callcvt (type);
5154 regparm = ix86_regparm;
5156 if ((ccvt & IX86_CALLCVT_REGPARM) != 0)
5158 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
5161 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
5165 else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
5167 else if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
5170 /* Use register calling convention for local functions when possible. */
5172 && TREE_CODE (decl) == FUNCTION_DECL
5174 && !(profile_flag && !flag_fentry))
5176 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5177 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
5178 if (i && i->local && i->can_change_signature)
5180 int local_regparm, globals = 0, regno;
5182 /* Make sure no regparm register is taken by a
5183 fixed register variable. */
5184 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
5185 if (fixed_regs[local_regparm])
5188 /* We don't want to use regparm(3) for nested functions as
5189 these use a static chain pointer in the third argument. */
5190 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
5193 /* In 32-bit mode save a register for the split stack. */
5194 if (!TARGET_64BIT && local_regparm == 3 && flag_split_stack)
5197 /* Each fixed register usage increases register pressure,
5198 so less registers should be used for argument passing.
5199 This functionality can be overriden by an explicit
5201 for (regno = 0; regno <= DI_REG; regno++)
5202 if (fixed_regs[regno])
5206 = globals < local_regparm ? local_regparm - globals : 0;
5208 if (local_regparm > regparm)
5209 regparm = local_regparm;
5216 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
5217 DFmode (2) arguments in SSE registers for a function with the
5218 indicated TYPE and DECL. DECL may be NULL when calling function
5219 indirectly or considering a libcall. Otherwise return 0. */
5222 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
5224 gcc_assert (!TARGET_64BIT);
5226 /* Use SSE registers to pass SFmode and DFmode arguments if requested
5227 by the sseregparm attribute. */
5228 if (TARGET_SSEREGPARM
5229 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
5236 error ("calling %qD with attribute sseregparm without "
5237 "SSE/SSE2 enabled", decl);
5239 error ("calling %qT with attribute sseregparm without "
5240 "SSE/SSE2 enabled", type);
5248 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
5249 (and DFmode for SSE2) arguments in SSE registers. */
5250 if (decl && TARGET_SSE_MATH && optimize
5251 && !(profile_flag && !flag_fentry))
5253 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5254 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
5255 if (i && i->local && i->can_change_signature)
5256 return TARGET_SSE2 ? 2 : 1;
5262 /* Return true if EAX is live at the start of the function. Used by
5263 ix86_expand_prologue to determine if we need special help before
5264 calling allocate_stack_worker. */
5267 ix86_eax_live_at_start_p (void)
5269 /* Cheat. Don't bother working forward from ix86_function_regparm
5270 to the function type to whether an actual argument is located in
5271 eax. Instead just look at cfg info, which is still close enough
5272 to correct at this point. This gives false positives for broken
5273 functions that might use uninitialized data that happens to be
5274 allocated in eax, but who cares? */
5275 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
5279 ix86_keep_aggregate_return_pointer (tree fntype)
5285 attr = lookup_attribute ("callee_pop_aggregate_return",
5286 TYPE_ATTRIBUTES (fntype));
5288 return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))) == 0);
5290 /* For 32-bit MS-ABI the default is to keep aggregate
5292 if (ix86_function_type_abi (fntype) == MS_ABI)
5295 return KEEP_AGGREGATE_RETURN_POINTER != 0;
5298 /* Value is the number of bytes of arguments automatically
5299 popped when returning from a subroutine call.
5300 FUNDECL is the declaration node of the function (as a tree),
5301 FUNTYPE is the data type of the function (as a tree),
5302 or for a library call it is an identifier node for the subroutine name.
5303 SIZE is the number of bytes of arguments passed on the stack.
5305 On the 80386, the RTD insn may be used to pop them if the number
5306 of args is fixed, but if the number is variable then the caller
5307 must pop them all. RTD can't be used for library calls now
5308 because the library is compiled with the Unix compiler.
5309 Use of RTD is a selectable option, since it is incompatible with
5310 standard Unix calling sequences. If the option is not selected,
5311 the caller must always pop the args.
5313 The attribute stdcall is equivalent to RTD on a per module basis. */
5316 ix86_return_pops_args (tree fundecl, tree funtype, int size)
5320 /* None of the 64-bit ABIs pop arguments. */
5324 ccvt = ix86_get_callcvt (funtype);
5326 if ((ccvt & (IX86_CALLCVT_STDCALL | IX86_CALLCVT_FASTCALL
5327 | IX86_CALLCVT_THISCALL)) != 0
5328 && ! stdarg_p (funtype))
5331 /* Lose any fake structure return argument if it is passed on the stack. */
5332 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
5333 && !ix86_keep_aggregate_return_pointer (funtype))
5335 int nregs = ix86_function_regparm (funtype, fundecl);
5337 return GET_MODE_SIZE (Pmode);
5343 /* Argument support functions. */
5345 /* Return true when register may be used to pass function parameters. */
5347 ix86_function_arg_regno_p (int regno)
5350 const int *parm_regs;
5355 return (regno < REGPARM_MAX
5356 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
5358 return (regno < REGPARM_MAX
5359 || (TARGET_MMX && MMX_REGNO_P (regno)
5360 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
5361 || (TARGET_SSE && SSE_REGNO_P (regno)
5362 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
5367 if (SSE_REGNO_P (regno) && TARGET_SSE)
5372 if (TARGET_SSE && SSE_REGNO_P (regno)
5373 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
5377 /* TODO: The function should depend on current function ABI but
5378 builtins.c would need updating then. Therefore we use the
5381 /* RAX is used as hidden argument to va_arg functions. */
5382 if (ix86_abi == SYSV_ABI && regno == AX_REG)
5385 if (ix86_abi == MS_ABI)
5386 parm_regs = x86_64_ms_abi_int_parameter_registers;
5388 parm_regs = x86_64_int_parameter_registers;
5389 for (i = 0; i < (ix86_abi == MS_ABI
5390 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
5391 if (regno == parm_regs[i])
5396 /* Return if we do not know how to pass TYPE solely in registers. */
5399 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
5401 if (must_pass_in_stack_var_size_or_pad (mode, type))
5404 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5405 The layout_type routine is crafty and tries to trick us into passing
5406 currently unsupported vector types on the stack by using TImode. */
5407 return (!TARGET_64BIT && mode == TImode
5408 && type && TREE_CODE (type) != VECTOR_TYPE);
5411 /* It returns the size, in bytes, of the area reserved for arguments passed
5412 in registers for the function represented by fndecl dependent to the used
5415 ix86_reg_parm_stack_space (const_tree fndecl)
5417 enum calling_abi call_abi = SYSV_ABI;
5418 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
5419 call_abi = ix86_function_abi (fndecl);
5421 call_abi = ix86_function_type_abi (fndecl);
5422 if (TARGET_64BIT && call_abi == MS_ABI)
5427 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5430 ix86_function_type_abi (const_tree fntype)
5432 if (fntype != NULL_TREE && TYPE_ATTRIBUTES (fntype) != NULL_TREE)
5434 enum calling_abi abi = ix86_abi;
5435 if (abi == SYSV_ABI)
5437 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5440 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5448 ix86_function_ms_hook_prologue (const_tree fn)
5450 if (fn && lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn)))
5452 if (decl_function_context (fn) != NULL_TREE)
5453 error_at (DECL_SOURCE_LOCATION (fn),
5454 "ms_hook_prologue is not compatible with nested function");
5461 static enum calling_abi
5462 ix86_function_abi (const_tree fndecl)
5466 return ix86_function_type_abi (TREE_TYPE (fndecl));
5469 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5472 ix86_cfun_abi (void)
5476 return cfun->machine->call_abi;
5479 /* Write the extra assembler code needed to declare a function properly. */
5482 ix86_asm_output_function_label (FILE *asm_out_file, const char *fname,
5485 bool is_ms_hook = ix86_function_ms_hook_prologue (decl);
5489 int i, filler_count = (TARGET_64BIT ? 32 : 16);
5490 unsigned int filler_cc = 0xcccccccc;
5492 for (i = 0; i < filler_count; i += 4)
5493 fprintf (asm_out_file, ASM_LONG " %#x\n", filler_cc);
5496 #ifdef SUBTARGET_ASM_UNWIND_INIT
5497 SUBTARGET_ASM_UNWIND_INIT (asm_out_file);
5500 ASM_OUTPUT_LABEL (asm_out_file, fname);
5502 /* Output magic byte marker, if hot-patch attribute is set. */
5507 /* leaq [%rsp + 0], %rsp */
5508 asm_fprintf (asm_out_file, ASM_BYTE
5509 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5513 /* movl.s %edi, %edi
5515 movl.s %esp, %ebp */
5516 asm_fprintf (asm_out_file, ASM_BYTE
5517 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5523 extern void init_regs (void);
5525 /* Implementation of call abi switching target hook. Specific to FNDECL
5526 the specific call register sets are set. See also
5527 ix86_conditional_register_usage for more details. */
5529 ix86_call_abi_override (const_tree fndecl)
5531 if (fndecl == NULL_TREE)
5532 cfun->machine->call_abi = ix86_abi;
5534 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5537 /* 64-bit MS and SYSV ABI have different set of call used registers. Avoid
5538 expensive re-initialization of init_regs each time we switch function context
5539 since this is needed only during RTL expansion. */
5541 ix86_maybe_switch_abi (void)
5544 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5548 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5549 for a call to a function whose data type is FNTYPE.
5550 For a library call, FNTYPE is 0. */
5553 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5554 tree fntype, /* tree ptr for function decl */
5555 rtx libname, /* SYMBOL_REF of library name or 0 */
5559 struct cgraph_local_info *i;
5562 memset (cum, 0, sizeof (*cum));
5564 /* Initialize for the current callee. */
5567 cfun->machine->callee_pass_avx256_p = false;
5568 cfun->machine->callee_return_avx256_p = false;
5573 i = cgraph_local_info (fndecl);
5574 cum->call_abi = ix86_function_abi (fndecl);
5575 fnret_type = TREE_TYPE (TREE_TYPE (fndecl));
5580 cum->call_abi = ix86_function_type_abi (fntype);
5582 fnret_type = TREE_TYPE (fntype);
5587 if (TARGET_VZEROUPPER && fnret_type)
5589 rtx fnret_value = ix86_function_value (fnret_type, fntype,
5591 if (function_pass_avx256_p (fnret_value))
5593 /* The return value of this function uses 256bit AVX modes. */
5595 cfun->machine->callee_return_avx256_p = true;
5597 cfun->machine->caller_return_avx256_p = true;
5601 cum->caller = caller;
5603 /* Set up the number of registers to use for passing arguments. */
5605 if (TARGET_64BIT && cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5606 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5607 "or subtarget optimization implying it");
5608 cum->nregs = ix86_regparm;
5611 cum->nregs = (cum->call_abi == SYSV_ABI
5612 ? X86_64_REGPARM_MAX
5613 : X86_64_MS_REGPARM_MAX);
5617 cum->sse_nregs = SSE_REGPARM_MAX;
5620 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5621 ? X86_64_SSE_REGPARM_MAX
5622 : X86_64_MS_SSE_REGPARM_MAX);
5626 cum->mmx_nregs = MMX_REGPARM_MAX;
5627 cum->warn_avx = true;
5628 cum->warn_sse = true;
5629 cum->warn_mmx = true;
5631 /* Because type might mismatch in between caller and callee, we need to
5632 use actual type of function for local calls.
5633 FIXME: cgraph_analyze can be told to actually record if function uses
5634 va_start so for local functions maybe_vaarg can be made aggressive
5636 FIXME: once typesytem is fixed, we won't need this code anymore. */
5637 if (i && i->local && i->can_change_signature)
5638 fntype = TREE_TYPE (fndecl);
5639 cum->maybe_vaarg = (fntype
5640 ? (!prototype_p (fntype) || stdarg_p (fntype))
5645 /* If there are variable arguments, then we won't pass anything
5646 in registers in 32-bit mode. */
5647 if (stdarg_p (fntype))
5658 /* Use ecx and edx registers if function has fastcall attribute,
5659 else look for regparm information. */
5662 unsigned int ccvt = ix86_get_callcvt (fntype);
5663 if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
5666 cum->fastcall = 1; /* Same first register as in fastcall. */
5668 else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
5674 cum->nregs = ix86_function_regparm (fntype, fndecl);
5677 /* Set up the number of SSE registers used for passing SFmode
5678 and DFmode arguments. Warn for mismatching ABI. */
5679 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5683 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5684 But in the case of vector types, it is some vector mode.
5686 When we have only some of our vector isa extensions enabled, then there
5687 are some modes for which vector_mode_supported_p is false. For these
5688 modes, the generic vector support in gcc will choose some non-vector mode
5689 in order to implement the type. By computing the natural mode, we'll
5690 select the proper ABI location for the operand and not depend on whatever
5691 the middle-end decides to do with these vector types.
5693 The midde-end can't deal with the vector types > 16 bytes. In this
5694 case, we return the original mode and warn ABI change if CUM isn't
5697 static enum machine_mode
5698 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
5700 enum machine_mode mode = TYPE_MODE (type);
5702 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5704 HOST_WIDE_INT size = int_size_in_bytes (type);
5705 if ((size == 8 || size == 16 || size == 32)
5706 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5707 && TYPE_VECTOR_SUBPARTS (type) > 1)
5709 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5711 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5712 mode = MIN_MODE_VECTOR_FLOAT;
5714 mode = MIN_MODE_VECTOR_INT;
5716 /* Get the mode which has this inner mode and number of units. */
5717 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5718 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5719 && GET_MODE_INNER (mode) == innermode)
5721 if (size == 32 && !TARGET_AVX)
5723 static bool warnedavx;
5730 warning (0, "AVX vector argument without AVX "
5731 "enabled changes the ABI");
5733 return TYPE_MODE (type);
5746 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5747 this may not agree with the mode that the type system has chosen for the
5748 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5749 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5752 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5757 if (orig_mode != BLKmode)
5758 tmp = gen_rtx_REG (orig_mode, regno);
5761 tmp = gen_rtx_REG (mode, regno);
5762 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5763 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5769 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5770 of this code is to classify each 8bytes of incoming argument by the register
5771 class and assign registers accordingly. */
5773 /* Return the union class of CLASS1 and CLASS2.
5774 See the x86-64 PS ABI for details. */
5776 static enum x86_64_reg_class
5777 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5779 /* Rule #1: If both classes are equal, this is the resulting class. */
5780 if (class1 == class2)
5783 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5785 if (class1 == X86_64_NO_CLASS)
5787 if (class2 == X86_64_NO_CLASS)
5790 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5791 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5792 return X86_64_MEMORY_CLASS;
5794 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5795 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5796 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5797 return X86_64_INTEGERSI_CLASS;
5798 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5799 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5800 return X86_64_INTEGER_CLASS;
5802 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5804 if (class1 == X86_64_X87_CLASS
5805 || class1 == X86_64_X87UP_CLASS
5806 || class1 == X86_64_COMPLEX_X87_CLASS
5807 || class2 == X86_64_X87_CLASS
5808 || class2 == X86_64_X87UP_CLASS
5809 || class2 == X86_64_COMPLEX_X87_CLASS)
5810 return X86_64_MEMORY_CLASS;
5812 /* Rule #6: Otherwise class SSE is used. */
5813 return X86_64_SSE_CLASS;
5816 /* Classify the argument of type TYPE and mode MODE.
5817 CLASSES will be filled by the register class used to pass each word
5818 of the operand. The number of words is returned. In case the parameter
5819 should be passed in memory, 0 is returned. As a special case for zero
5820 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5822 BIT_OFFSET is used internally for handling records and specifies offset
5823 of the offset in bits modulo 256 to avoid overflow cases.
5825 See the x86-64 PS ABI for details.
5829 classify_argument (enum machine_mode mode, const_tree type,
5830 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5832 HOST_WIDE_INT bytes =
5833 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5834 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5836 /* Variable sized entities are always passed/returned in memory. */
5840 if (mode != VOIDmode
5841 && targetm.calls.must_pass_in_stack (mode, type))
5844 if (type && AGGREGATE_TYPE_P (type))
5848 enum x86_64_reg_class subclasses[MAX_CLASSES];
5850 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5854 for (i = 0; i < words; i++)
5855 classes[i] = X86_64_NO_CLASS;
5857 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5858 signalize memory class, so handle it as special case. */
5861 classes[0] = X86_64_NO_CLASS;
5865 /* Classify each field of record and merge classes. */
5866 switch (TREE_CODE (type))
5869 /* And now merge the fields of structure. */
5870 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5872 if (TREE_CODE (field) == FIELD_DECL)
5876 if (TREE_TYPE (field) == error_mark_node)
5879 /* Bitfields are always classified as integer. Handle them
5880 early, since later code would consider them to be
5881 misaligned integers. */
5882 if (DECL_BIT_FIELD (field))
5884 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5885 i < ((int_bit_position (field) + (bit_offset % 64))
5886 + tree_low_cst (DECL_SIZE (field), 0)
5889 merge_classes (X86_64_INTEGER_CLASS,
5896 type = TREE_TYPE (field);
5898 /* Flexible array member is ignored. */
5899 if (TYPE_MODE (type) == BLKmode
5900 && TREE_CODE (type) == ARRAY_TYPE
5901 && TYPE_SIZE (type) == NULL_TREE
5902 && TYPE_DOMAIN (type) != NULL_TREE
5903 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5908 if (!warned && warn_psabi)
5911 inform (input_location,
5912 "the ABI of passing struct with"
5913 " a flexible array member has"
5914 " changed in GCC 4.4");
5918 num = classify_argument (TYPE_MODE (type), type,
5920 (int_bit_position (field)
5921 + bit_offset) % 256);
5924 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5925 for (i = 0; i < num && (i + pos) < words; i++)
5927 merge_classes (subclasses[i], classes[i + pos]);
5934 /* Arrays are handled as small records. */
5937 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5938 TREE_TYPE (type), subclasses, bit_offset);
5942 /* The partial classes are now full classes. */
5943 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5944 subclasses[0] = X86_64_SSE_CLASS;
5945 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5946 && !((bit_offset % 64) == 0 && bytes == 4))
5947 subclasses[0] = X86_64_INTEGER_CLASS;
5949 for (i = 0; i < words; i++)
5950 classes[i] = subclasses[i % num];
5955 case QUAL_UNION_TYPE:
5956 /* Unions are similar to RECORD_TYPE but offset is always 0.
5958 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5960 if (TREE_CODE (field) == FIELD_DECL)
5964 if (TREE_TYPE (field) == error_mark_node)
5967 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5968 TREE_TYPE (field), subclasses,
5972 for (i = 0; i < num; i++)
5973 classes[i] = merge_classes (subclasses[i], classes[i]);
5984 /* When size > 16 bytes, if the first one isn't
5985 X86_64_SSE_CLASS or any other ones aren't
5986 X86_64_SSEUP_CLASS, everything should be passed in
5988 if (classes[0] != X86_64_SSE_CLASS)
5991 for (i = 1; i < words; i++)
5992 if (classes[i] != X86_64_SSEUP_CLASS)
5996 /* Final merger cleanup. */
5997 for (i = 0; i < words; i++)
5999 /* If one class is MEMORY, everything should be passed in
6001 if (classes[i] == X86_64_MEMORY_CLASS)
6004 /* The X86_64_SSEUP_CLASS should be always preceded by
6005 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
6006 if (classes[i] == X86_64_SSEUP_CLASS
6007 && classes[i - 1] != X86_64_SSE_CLASS
6008 && classes[i - 1] != X86_64_SSEUP_CLASS)
6010 /* The first one should never be X86_64_SSEUP_CLASS. */
6011 gcc_assert (i != 0);
6012 classes[i] = X86_64_SSE_CLASS;
6015 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
6016 everything should be passed in memory. */
6017 if (classes[i] == X86_64_X87UP_CLASS
6018 && (classes[i - 1] != X86_64_X87_CLASS))
6022 /* The first one should never be X86_64_X87UP_CLASS. */
6023 gcc_assert (i != 0);
6024 if (!warned && warn_psabi)
6027 inform (input_location,
6028 "the ABI of passing union with long double"
6029 " has changed in GCC 4.4");
6037 /* Compute alignment needed. We align all types to natural boundaries with
6038 exception of XFmode that is aligned to 64bits. */
6039 if (mode != VOIDmode && mode != BLKmode)
6041 int mode_alignment = GET_MODE_BITSIZE (mode);
6044 mode_alignment = 128;
6045 else if (mode == XCmode)
6046 mode_alignment = 256;
6047 if (COMPLEX_MODE_P (mode))
6048 mode_alignment /= 2;
6049 /* Misaligned fields are always returned in memory. */
6050 if (bit_offset % mode_alignment)
6054 /* for V1xx modes, just use the base mode */
6055 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
6056 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
6057 mode = GET_MODE_INNER (mode);
6059 /* Classification of atomic types. */
6064 classes[0] = X86_64_SSE_CLASS;
6067 classes[0] = X86_64_SSE_CLASS;
6068 classes[1] = X86_64_SSEUP_CLASS;
6078 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
6082 classes[0] = X86_64_INTEGERSI_CLASS;
6085 else if (size <= 64)
6087 classes[0] = X86_64_INTEGER_CLASS;
6090 else if (size <= 64+32)
6092 classes[0] = X86_64_INTEGER_CLASS;
6093 classes[1] = X86_64_INTEGERSI_CLASS;
6096 else if (size <= 64+64)
6098 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6106 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
6110 /* OImode shouldn't be used directly. */
6115 if (!(bit_offset % 64))
6116 classes[0] = X86_64_SSESF_CLASS;
6118 classes[0] = X86_64_SSE_CLASS;
6121 classes[0] = X86_64_SSEDF_CLASS;
6124 classes[0] = X86_64_X87_CLASS;
6125 classes[1] = X86_64_X87UP_CLASS;
6128 classes[0] = X86_64_SSE_CLASS;
6129 classes[1] = X86_64_SSEUP_CLASS;
6132 classes[0] = X86_64_SSE_CLASS;
6133 if (!(bit_offset % 64))
6139 if (!warned && warn_psabi)
6142 inform (input_location,
6143 "the ABI of passing structure with complex float"
6144 " member has changed in GCC 4.4");
6146 classes[1] = X86_64_SSESF_CLASS;
6150 classes[0] = X86_64_SSEDF_CLASS;
6151 classes[1] = X86_64_SSEDF_CLASS;
6154 classes[0] = X86_64_COMPLEX_X87_CLASS;
6157 /* This modes is larger than 16 bytes. */
6165 classes[0] = X86_64_SSE_CLASS;
6166 classes[1] = X86_64_SSEUP_CLASS;
6167 classes[2] = X86_64_SSEUP_CLASS;
6168 classes[3] = X86_64_SSEUP_CLASS;
6176 classes[0] = X86_64_SSE_CLASS;
6177 classes[1] = X86_64_SSEUP_CLASS;
6185 classes[0] = X86_64_SSE_CLASS;
6191 gcc_assert (VECTOR_MODE_P (mode));
6196 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
6198 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
6199 classes[0] = X86_64_INTEGERSI_CLASS;
6201 classes[0] = X86_64_INTEGER_CLASS;
6202 classes[1] = X86_64_INTEGER_CLASS;
6203 return 1 + (bytes > 8);
6207 /* Examine the argument and return set number of register required in each
6208 class. Return 0 iff parameter should be passed in memory. */
6210 examine_argument (enum machine_mode mode, const_tree type, int in_return,
6211 int *int_nregs, int *sse_nregs)
6213 enum x86_64_reg_class regclass[MAX_CLASSES];
6214 int n = classify_argument (mode, type, regclass, 0);
6220 for (n--; n >= 0; n--)
6221 switch (regclass[n])
6223 case X86_64_INTEGER_CLASS:
6224 case X86_64_INTEGERSI_CLASS:
6227 case X86_64_SSE_CLASS:
6228 case X86_64_SSESF_CLASS:
6229 case X86_64_SSEDF_CLASS:
6232 case X86_64_NO_CLASS:
6233 case X86_64_SSEUP_CLASS:
6235 case X86_64_X87_CLASS:
6236 case X86_64_X87UP_CLASS:
6240 case X86_64_COMPLEX_X87_CLASS:
6241 return in_return ? 2 : 0;
6242 case X86_64_MEMORY_CLASS:
6248 /* Construct container for the argument used by GCC interface. See
6249 FUNCTION_ARG for the detailed description. */
6252 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
6253 const_tree type, int in_return, int nintregs, int nsseregs,
6254 const int *intreg, int sse_regno)
6256 /* The following variables hold the static issued_error state. */
6257 static bool issued_sse_arg_error;
6258 static bool issued_sse_ret_error;
6259 static bool issued_x87_ret_error;
6261 enum machine_mode tmpmode;
6263 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6264 enum x86_64_reg_class regclass[MAX_CLASSES];
6268 int needed_sseregs, needed_intregs;
6269 rtx exp[MAX_CLASSES];
6272 n = classify_argument (mode, type, regclass, 0);
6275 if (!examine_argument (mode, type, in_return, &needed_intregs,
6278 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
6281 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
6282 some less clueful developer tries to use floating-point anyway. */
6283 if (needed_sseregs && !TARGET_SSE)
6287 if (!issued_sse_ret_error)
6289 error ("SSE register return with SSE disabled");
6290 issued_sse_ret_error = true;
6293 else if (!issued_sse_arg_error)
6295 error ("SSE register argument with SSE disabled");
6296 issued_sse_arg_error = true;
6301 /* Likewise, error if the ABI requires us to return values in the
6302 x87 registers and the user specified -mno-80387. */
6303 if (!TARGET_80387 && in_return)
6304 for (i = 0; i < n; i++)
6305 if (regclass[i] == X86_64_X87_CLASS
6306 || regclass[i] == X86_64_X87UP_CLASS
6307 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
6309 if (!issued_x87_ret_error)
6311 error ("x87 register return with x87 disabled");
6312 issued_x87_ret_error = true;
6317 /* First construct simple cases. Avoid SCmode, since we want to use
6318 single register to pass this type. */
6319 if (n == 1 && mode != SCmode)
6320 switch (regclass[0])
6322 case X86_64_INTEGER_CLASS:
6323 case X86_64_INTEGERSI_CLASS:
6324 return gen_rtx_REG (mode, intreg[0]);
6325 case X86_64_SSE_CLASS:
6326 case X86_64_SSESF_CLASS:
6327 case X86_64_SSEDF_CLASS:
6328 if (mode != BLKmode)
6329 return gen_reg_or_parallel (mode, orig_mode,
6330 SSE_REGNO (sse_regno));
6332 case X86_64_X87_CLASS:
6333 case X86_64_COMPLEX_X87_CLASS:
6334 return gen_rtx_REG (mode, FIRST_STACK_REG);
6335 case X86_64_NO_CLASS:
6336 /* Zero sized array, struct or class. */
6341 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
6342 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
6343 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6345 && regclass[0] == X86_64_SSE_CLASS
6346 && regclass[1] == X86_64_SSEUP_CLASS
6347 && regclass[2] == X86_64_SSEUP_CLASS
6348 && regclass[3] == X86_64_SSEUP_CLASS
6350 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6353 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
6354 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
6355 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
6356 && regclass[1] == X86_64_INTEGER_CLASS
6357 && (mode == CDImode || mode == TImode || mode == TFmode)
6358 && intreg[0] + 1 == intreg[1])
6359 return gen_rtx_REG (mode, intreg[0]);
6361 /* Otherwise figure out the entries of the PARALLEL. */
6362 for (i = 0; i < n; i++)
6366 switch (regclass[i])
6368 case X86_64_NO_CLASS:
6370 case X86_64_INTEGER_CLASS:
6371 case X86_64_INTEGERSI_CLASS:
6372 /* Merge TImodes on aligned occasions here too. */
6373 if (i * 8 + 8 > bytes)
6374 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
6375 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
6379 /* We've requested 24 bytes we don't have mode for. Use DImode. */
6380 if (tmpmode == BLKmode)
6382 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6383 gen_rtx_REG (tmpmode, *intreg),
6387 case X86_64_SSESF_CLASS:
6388 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6389 gen_rtx_REG (SFmode,
6390 SSE_REGNO (sse_regno)),
6394 case X86_64_SSEDF_CLASS:
6395 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6396 gen_rtx_REG (DFmode,
6397 SSE_REGNO (sse_regno)),
6401 case X86_64_SSE_CLASS:
6409 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
6419 && regclass[1] == X86_64_SSEUP_CLASS
6420 && regclass[2] == X86_64_SSEUP_CLASS
6421 && regclass[3] == X86_64_SSEUP_CLASS);
6428 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6429 gen_rtx_REG (tmpmode,
6430 SSE_REGNO (sse_regno)),
6439 /* Empty aligned struct, union or class. */
6443 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
6444 for (i = 0; i < nexps; i++)
6445 XVECEXP (ret, 0, i) = exp [i];
6449 /* Update the data in CUM to advance over an argument of mode MODE
6450 and data type TYPE. (TYPE is null for libcalls where that information
6451 may not be available.) */
6454 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6455 const_tree type, HOST_WIDE_INT bytes,
6456 HOST_WIDE_INT words)
6472 cum->words += words;
6473 cum->nregs -= words;
6474 cum->regno += words;
6476 if (cum->nregs <= 0)
6484 /* OImode shouldn't be used directly. */
6488 if (cum->float_in_sse < 2)
6491 if (cum->float_in_sse < 1)
6508 if (!type || !AGGREGATE_TYPE_P (type))
6510 cum->sse_words += words;
6511 cum->sse_nregs -= 1;
6512 cum->sse_regno += 1;
6513 if (cum->sse_nregs <= 0)
6527 if (!type || !AGGREGATE_TYPE_P (type))
6529 cum->mmx_words += words;
6530 cum->mmx_nregs -= 1;
6531 cum->mmx_regno += 1;
6532 if (cum->mmx_nregs <= 0)
6543 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6544 const_tree type, HOST_WIDE_INT words, bool named)
6546 int int_nregs, sse_nregs;
6548 /* Unnamed 256bit vector mode parameters are passed on stack. */
6549 if (!named && VALID_AVX256_REG_MODE (mode))
6552 if (examine_argument (mode, type, 0, &int_nregs, &sse_nregs)
6553 && sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6555 cum->nregs -= int_nregs;
6556 cum->sse_nregs -= sse_nregs;
6557 cum->regno += int_nregs;
6558 cum->sse_regno += sse_nregs;
6562 int align = ix86_function_arg_boundary (mode, type) / BITS_PER_WORD;
6563 cum->words = (cum->words + align - 1) & ~(align - 1);
6564 cum->words += words;
6569 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6570 HOST_WIDE_INT words)
6572 /* Otherwise, this should be passed indirect. */
6573 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6575 cum->words += words;
6583 /* Update the data in CUM to advance over an argument of mode MODE and
6584 data type TYPE. (TYPE is null for libcalls where that information
6585 may not be available.) */
6588 ix86_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6589 const_tree type, bool named)
6591 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6592 HOST_WIDE_INT bytes, words;
6594 if (mode == BLKmode)
6595 bytes = int_size_in_bytes (type);
6597 bytes = GET_MODE_SIZE (mode);
6598 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6601 mode = type_natural_mode (type, NULL);
6603 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6604 function_arg_advance_ms_64 (cum, bytes, words);
6605 else if (TARGET_64BIT)
6606 function_arg_advance_64 (cum, mode, type, words, named);
6608 function_arg_advance_32 (cum, mode, type, bytes, words);
6611 /* Define where to put the arguments to a function.
6612 Value is zero to push the argument on the stack,
6613 or a hard register in which to store the argument.
6615 MODE is the argument's machine mode.
6616 TYPE is the data type of the argument (as a tree).
6617 This is null for libcalls where that information may
6619 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6620 the preceding args and about the function being called.
6621 NAMED is nonzero if this argument is a named parameter
6622 (otherwise it is an extra parameter matching an ellipsis). */
6625 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6626 enum machine_mode orig_mode, const_tree type,
6627 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6629 static bool warnedsse, warnedmmx;
6631 /* Avoid the AL settings for the Unix64 ABI. */
6632 if (mode == VOIDmode)
6648 if (words <= cum->nregs)
6650 int regno = cum->regno;
6652 /* Fastcall allocates the first two DWORD (SImode) or
6653 smaller arguments to ECX and EDX if it isn't an
6659 || (type && AGGREGATE_TYPE_P (type)))
6662 /* ECX not EAX is the first allocated register. */
6663 if (regno == AX_REG)
6666 return gen_rtx_REG (mode, regno);
6671 if (cum->float_in_sse < 2)
6674 if (cum->float_in_sse < 1)
6678 /* In 32bit, we pass TImode in xmm registers. */
6685 if (!type || !AGGREGATE_TYPE_P (type))
6687 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6690 warning (0, "SSE vector argument without SSE enabled "
6694 return gen_reg_or_parallel (mode, orig_mode,
6695 cum->sse_regno + FIRST_SSE_REG);
6700 /* OImode shouldn't be used directly. */
6709 if (!type || !AGGREGATE_TYPE_P (type))
6712 return gen_reg_or_parallel (mode, orig_mode,
6713 cum->sse_regno + FIRST_SSE_REG);
6723 if (!type || !AGGREGATE_TYPE_P (type))
6725 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6728 warning (0, "MMX vector argument without MMX enabled "
6732 return gen_reg_or_parallel (mode, orig_mode,
6733 cum->mmx_regno + FIRST_MMX_REG);
6742 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6743 enum machine_mode orig_mode, const_tree type, bool named)
6745 /* Handle a hidden AL argument containing number of registers
6746 for varargs x86-64 functions. */
6747 if (mode == VOIDmode)
6748 return GEN_INT (cum->maybe_vaarg
6749 ? (cum->sse_nregs < 0
6750 ? X86_64_SSE_REGPARM_MAX
6765 /* Unnamed 256bit vector mode parameters are passed on stack. */
6771 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6773 &x86_64_int_parameter_registers [cum->regno],
6778 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6779 enum machine_mode orig_mode, bool named,
6780 HOST_WIDE_INT bytes)
6784 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6785 We use value of -2 to specify that current function call is MSABI. */
6786 if (mode == VOIDmode)
6787 return GEN_INT (-2);
6789 /* If we've run out of registers, it goes on the stack. */
6790 if (cum->nregs == 0)
6793 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6795 /* Only floating point modes are passed in anything but integer regs. */
6796 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6799 regno = cum->regno + FIRST_SSE_REG;
6804 /* Unnamed floating parameters are passed in both the
6805 SSE and integer registers. */
6806 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6807 t2 = gen_rtx_REG (mode, regno);
6808 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6809 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6810 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6813 /* Handle aggregated types passed in register. */
6814 if (orig_mode == BLKmode)
6816 if (bytes > 0 && bytes <= 8)
6817 mode = (bytes > 4 ? DImode : SImode);
6818 if (mode == BLKmode)
6822 return gen_reg_or_parallel (mode, orig_mode, regno);
6825 /* Return where to put the arguments to a function.
6826 Return zero to push the argument on the stack, or a hard register in which to store the argument.
6828 MODE is the argument's machine mode. TYPE is the data type of the
6829 argument. It is null for libcalls where that information may not be
6830 available. CUM gives information about the preceding args and about
6831 the function being called. NAMED is nonzero if this argument is a
6832 named parameter (otherwise it is an extra parameter matching an
6836 ix86_function_arg (cumulative_args_t cum_v, enum machine_mode omode,
6837 const_tree type, bool named)
6839 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6840 enum machine_mode mode = omode;
6841 HOST_WIDE_INT bytes, words;
6844 if (mode == BLKmode)
6845 bytes = int_size_in_bytes (type);
6847 bytes = GET_MODE_SIZE (mode);
6848 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6850 /* To simplify the code below, represent vector types with a vector mode
6851 even if MMX/SSE are not active. */
6852 if (type && TREE_CODE (type) == VECTOR_TYPE)
6853 mode = type_natural_mode (type, cum);
6855 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6856 arg = function_arg_ms_64 (cum, mode, omode, named, bytes);
6857 else if (TARGET_64BIT)
6858 arg = function_arg_64 (cum, mode, omode, type, named);
6860 arg = function_arg_32 (cum, mode, omode, type, bytes, words);
6862 if (TARGET_VZEROUPPER && function_pass_avx256_p (arg))
6864 /* This argument uses 256bit AVX modes. */
6866 cfun->machine->callee_pass_avx256_p = true;
6868 cfun->machine->caller_pass_avx256_p = true;
6874 /* A C expression that indicates when an argument must be passed by
6875 reference. If nonzero for an argument, a copy of that argument is
6876 made in memory and a pointer to the argument is passed instead of
6877 the argument itself. The pointer is passed in whatever way is
6878 appropriate for passing a pointer to that type. */
6881 ix86_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED,
6882 enum machine_mode mode ATTRIBUTE_UNUSED,
6883 const_tree type, bool named ATTRIBUTE_UNUSED)
6885 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6887 /* See Windows x64 Software Convention. */
6888 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6890 int msize = (int) GET_MODE_SIZE (mode);
6893 /* Arrays are passed by reference. */
6894 if (TREE_CODE (type) == ARRAY_TYPE)
6897 if (AGGREGATE_TYPE_P (type))
6899 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6900 are passed by reference. */
6901 msize = int_size_in_bytes (type);
6905 /* __m128 is passed by reference. */
6907 case 1: case 2: case 4: case 8:
6913 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6919 /* Return true when TYPE should be 128bit aligned for 32bit argument
6920 passing ABI. XXX: This function is obsolete and is only used for
6921 checking psABI compatibility with previous versions of GCC. */
6924 ix86_compat_aligned_value_p (const_tree type)
6926 enum machine_mode mode = TYPE_MODE (type);
6927 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6931 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6933 if (TYPE_ALIGN (type) < 128)
6936 if (AGGREGATE_TYPE_P (type))
6938 /* Walk the aggregates recursively. */
6939 switch (TREE_CODE (type))
6943 case QUAL_UNION_TYPE:
6947 /* Walk all the structure fields. */
6948 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6950 if (TREE_CODE (field) == FIELD_DECL
6951 && ix86_compat_aligned_value_p (TREE_TYPE (field)))
6958 /* Just for use if some languages passes arrays by value. */
6959 if (ix86_compat_aligned_value_p (TREE_TYPE (type)))
6970 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
6971 XXX: This function is obsolete and is only used for checking psABI
6972 compatibility with previous versions of GCC. */
6975 ix86_compat_function_arg_boundary (enum machine_mode mode,
6976 const_tree type, unsigned int align)
6978 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6979 natural boundaries. */
6980 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6982 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6983 make an exception for SSE modes since these require 128bit
6986 The handling here differs from field_alignment. ICC aligns MMX
6987 arguments to 4 byte boundaries, while structure fields are aligned
6988 to 8 byte boundaries. */
6991 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6992 align = PARM_BOUNDARY;
6996 if (!ix86_compat_aligned_value_p (type))
6997 align = PARM_BOUNDARY;
7000 if (align > BIGGEST_ALIGNMENT)
7001 align = BIGGEST_ALIGNMENT;
7005 /* Return true when TYPE should be 128bit aligned for 32bit argument
7009 ix86_contains_aligned_value_p (const_tree type)
7011 enum machine_mode mode = TYPE_MODE (type);
7013 if (mode == XFmode || mode == XCmode)
7016 if (TYPE_ALIGN (type) < 128)
7019 if (AGGREGATE_TYPE_P (type))
7021 /* Walk the aggregates recursively. */
7022 switch (TREE_CODE (type))
7026 case QUAL_UNION_TYPE:
7030 /* Walk all the structure fields. */
7031 for (field = TYPE_FIELDS (type);
7033 field = DECL_CHAIN (field))
7035 if (TREE_CODE (field) == FIELD_DECL
7036 && ix86_contains_aligned_value_p (TREE_TYPE (field)))
7043 /* Just for use if some languages passes arrays by value. */
7044 if (ix86_contains_aligned_value_p (TREE_TYPE (type)))
7053 return TYPE_ALIGN (type) >= 128;
7058 /* Gives the alignment boundary, in bits, of an argument with the
7059 specified mode and type. */
7062 ix86_function_arg_boundary (enum machine_mode mode, const_tree type)
7067 /* Since the main variant type is used for call, we convert it to
7068 the main variant type. */
7069 type = TYPE_MAIN_VARIANT (type);
7070 align = TYPE_ALIGN (type);
7073 align = GET_MODE_ALIGNMENT (mode);
7074 if (align < PARM_BOUNDARY)
7075 align = PARM_BOUNDARY;
7079 unsigned int saved_align = align;
7083 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
7086 if (mode == XFmode || mode == XCmode)
7087 align = PARM_BOUNDARY;
7089 else if (!ix86_contains_aligned_value_p (type))
7090 align = PARM_BOUNDARY;
7093 align = PARM_BOUNDARY;
7098 && align != ix86_compat_function_arg_boundary (mode, type,
7102 inform (input_location,
7103 "The ABI for passing parameters with %d-byte"
7104 " alignment has changed in GCC 4.6",
7105 align / BITS_PER_UNIT);
7112 /* Return true if N is a possible register number of function value. */
7115 ix86_function_value_regno_p (const unsigned int regno)
7122 case FIRST_FLOAT_REG:
7123 /* TODO: The function should depend on current function ABI but
7124 builtins.c would need updating then. Therefore we use the
7126 if (TARGET_64BIT && ix86_abi == MS_ABI)
7128 return TARGET_FLOAT_RETURNS_IN_80387;
7134 if (TARGET_MACHO || TARGET_64BIT)
7142 /* Define how to find the value returned by a function.
7143 VALTYPE is the data type of the value (as a tree).
7144 If the precise function being called is known, FUNC is its FUNCTION_DECL;
7145 otherwise, FUNC is 0. */
7148 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
7149 const_tree fntype, const_tree fn)
7153 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
7154 we normally prevent this case when mmx is not available. However
7155 some ABIs may require the result to be returned like DImode. */
7156 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7157 regno = FIRST_MMX_REG;
7159 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
7160 we prevent this case when sse is not available. However some ABIs
7161 may require the result to be returned like integer TImode. */
7162 else if (mode == TImode
7163 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7164 regno = FIRST_SSE_REG;
7166 /* 32-byte vector modes in %ymm0. */
7167 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
7168 regno = FIRST_SSE_REG;
7170 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
7171 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
7172 regno = FIRST_FLOAT_REG;
7174 /* Most things go in %eax. */
7177 /* Override FP return register with %xmm0 for local functions when
7178 SSE math is enabled or for functions with sseregparm attribute. */
7179 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
7181 int sse_level = ix86_function_sseregparm (fntype, fn, false);
7182 if ((sse_level >= 1 && mode == SFmode)
7183 || (sse_level == 2 && mode == DFmode))
7184 regno = FIRST_SSE_REG;
7187 /* OImode shouldn't be used directly. */
7188 gcc_assert (mode != OImode);
7190 return gen_rtx_REG (orig_mode, regno);
7194 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
7199 /* Handle libcalls, which don't provide a type node. */
7200 if (valtype == NULL)
7214 regno = FIRST_SSE_REG;
7218 regno = FIRST_FLOAT_REG;
7226 return gen_rtx_REG (mode, regno);
7228 else if (POINTER_TYPE_P (valtype))
7230 /* Pointers are always returned in Pmode. */
7234 ret = construct_container (mode, orig_mode, valtype, 1,
7235 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
7236 x86_64_int_return_registers, 0);
7238 /* For zero sized structures, construct_container returns NULL, but we
7239 need to keep rest of compiler happy by returning meaningful value. */
7241 ret = gen_rtx_REG (orig_mode, AX_REG);
7247 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
7249 unsigned int regno = AX_REG;
7253 switch (GET_MODE_SIZE (mode))
7256 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7257 && !COMPLEX_MODE_P (mode))
7258 regno = FIRST_SSE_REG;
7262 if (mode == SFmode || mode == DFmode)
7263 regno = FIRST_SSE_REG;
7269 return gen_rtx_REG (orig_mode, regno);
7273 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
7274 enum machine_mode orig_mode, enum machine_mode mode)
7276 const_tree fn, fntype;
7279 if (fntype_or_decl && DECL_P (fntype_or_decl))
7280 fn = fntype_or_decl;
7281 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
7283 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
7284 return function_value_ms_64 (orig_mode, mode);
7285 else if (TARGET_64BIT)
7286 return function_value_64 (orig_mode, mode, valtype);
7288 return function_value_32 (orig_mode, mode, fntype, fn);
7292 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
7293 bool outgoing ATTRIBUTE_UNUSED)
7295 enum machine_mode mode, orig_mode;
7297 orig_mode = TYPE_MODE (valtype);
7298 mode = type_natural_mode (valtype, NULL);
7299 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
7302 /* Pointer function arguments and return values are promoted to Pmode. */
7304 static enum machine_mode
7305 ix86_promote_function_mode (const_tree type, enum machine_mode mode,
7306 int *punsignedp, const_tree fntype,
7309 if (type != NULL_TREE && POINTER_TYPE_P (type))
7311 *punsignedp = POINTERS_EXTEND_UNSIGNED;
7314 return default_promote_function_mode (type, mode, punsignedp, fntype,
7319 ix86_libcall_value (enum machine_mode mode)
7321 return ix86_function_value_1 (NULL, NULL, mode, mode);
7324 /* Return true iff type is returned in memory. */
7326 static bool ATTRIBUTE_UNUSED
7327 return_in_memory_32 (const_tree type, enum machine_mode mode)
7331 if (mode == BLKmode)
7334 size = int_size_in_bytes (type);
7336 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
7339 if (VECTOR_MODE_P (mode) || mode == TImode)
7341 /* User-created vectors small enough to fit in EAX. */
7345 /* MMX/3dNow values are returned in MM0,
7346 except when it doesn't exits or the ABI prescribes otherwise. */
7348 return !TARGET_MMX || TARGET_VECT8_RETURNS;
7350 /* SSE values are returned in XMM0, except when it doesn't exist. */
7354 /* AVX values are returned in YMM0, except when it doesn't exist. */
7365 /* OImode shouldn't be used directly. */
7366 gcc_assert (mode != OImode);
7371 static bool ATTRIBUTE_UNUSED
7372 return_in_memory_64 (const_tree type, enum machine_mode mode)
7374 int needed_intregs, needed_sseregs;
7375 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
7378 static bool ATTRIBUTE_UNUSED
7379 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
7381 HOST_WIDE_INT size = int_size_in_bytes (type);
7383 /* __m128 is returned in xmm0. */
7384 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7385 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
7388 /* Otherwise, the size must be exactly in [1248]. */
7389 return size != 1 && size != 2 && size != 4 && size != 8;
7393 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7395 #ifdef SUBTARGET_RETURN_IN_MEMORY
7396 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
7398 const enum machine_mode mode = type_natural_mode (type, NULL);
7402 if (ix86_function_type_abi (fntype) == MS_ABI)
7403 return return_in_memory_ms_64 (type, mode);
7405 return return_in_memory_64 (type, mode);
7408 return return_in_memory_32 (type, mode);
7412 /* When returning SSE vector types, we have a choice of either
7413 (1) being abi incompatible with a -march switch, or
7414 (2) generating an error.
7415 Given no good solution, I think the safest thing is one warning.
7416 The user won't be able to use -Werror, but....
7418 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
7419 called in response to actually generating a caller or callee that
7420 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
7421 via aggregate_value_p for general type probing from tree-ssa. */
7424 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
7426 static bool warnedsse, warnedmmx;
7428 if (!TARGET_64BIT && type)
7430 /* Look at the return type of the function, not the function type. */
7431 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
7433 if (!TARGET_SSE && !warnedsse)
7436 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7439 warning (0, "SSE vector return without SSE enabled "
7444 if (!TARGET_MMX && !warnedmmx)
7446 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7449 warning (0, "MMX vector return without MMX enabled "
7459 /* Create the va_list data type. */
7461 /* Returns the calling convention specific va_list date type.
7462 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
7465 ix86_build_builtin_va_list_abi (enum calling_abi abi)
7467 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
7469 /* For i386 we use plain pointer to argument area. */
7470 if (!TARGET_64BIT || abi == MS_ABI)
7471 return build_pointer_type (char_type_node);
7473 record = lang_hooks.types.make_type (RECORD_TYPE);
7474 type_decl = build_decl (BUILTINS_LOCATION,
7475 TYPE_DECL, get_identifier ("__va_list_tag"), record);
7477 f_gpr = build_decl (BUILTINS_LOCATION,
7478 FIELD_DECL, get_identifier ("gp_offset"),
7479 unsigned_type_node);
7480 f_fpr = build_decl (BUILTINS_LOCATION,
7481 FIELD_DECL, get_identifier ("fp_offset"),
7482 unsigned_type_node);
7483 f_ovf = build_decl (BUILTINS_LOCATION,
7484 FIELD_DECL, get_identifier ("overflow_arg_area"),
7486 f_sav = build_decl (BUILTINS_LOCATION,
7487 FIELD_DECL, get_identifier ("reg_save_area"),
7490 va_list_gpr_counter_field = f_gpr;
7491 va_list_fpr_counter_field = f_fpr;
7493 DECL_FIELD_CONTEXT (f_gpr) = record;
7494 DECL_FIELD_CONTEXT (f_fpr) = record;
7495 DECL_FIELD_CONTEXT (f_ovf) = record;
7496 DECL_FIELD_CONTEXT (f_sav) = record;
7498 TYPE_STUB_DECL (record) = type_decl;
7499 TYPE_NAME (record) = type_decl;
7500 TYPE_FIELDS (record) = f_gpr;
7501 DECL_CHAIN (f_gpr) = f_fpr;
7502 DECL_CHAIN (f_fpr) = f_ovf;
7503 DECL_CHAIN (f_ovf) = f_sav;
7505 layout_type (record);
7507 /* The correct type is an array type of one element. */
7508 return build_array_type (record, build_index_type (size_zero_node));
7511 /* Setup the builtin va_list data type and for 64-bit the additional
7512 calling convention specific va_list data types. */
7515 ix86_build_builtin_va_list (void)
7517 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
7519 /* Initialize abi specific va_list builtin types. */
7523 if (ix86_abi == MS_ABI)
7525 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
7526 if (TREE_CODE (t) != RECORD_TYPE)
7527 t = build_variant_type_copy (t);
7528 sysv_va_list_type_node = t;
7533 if (TREE_CODE (t) != RECORD_TYPE)
7534 t = build_variant_type_copy (t);
7535 sysv_va_list_type_node = t;
7537 if (ix86_abi != MS_ABI)
7539 t = ix86_build_builtin_va_list_abi (MS_ABI);
7540 if (TREE_CODE (t) != RECORD_TYPE)
7541 t = build_variant_type_copy (t);
7542 ms_va_list_type_node = t;
7547 if (TREE_CODE (t) != RECORD_TYPE)
7548 t = build_variant_type_copy (t);
7549 ms_va_list_type_node = t;
7556 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7559 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
7565 /* GPR size of varargs save area. */
7566 if (cfun->va_list_gpr_size)
7567 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
7569 ix86_varargs_gpr_size = 0;
7571 /* FPR size of varargs save area. We don't need it if we don't pass
7572 anything in SSE registers. */
7573 if (TARGET_SSE && cfun->va_list_fpr_size)
7574 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
7576 ix86_varargs_fpr_size = 0;
7578 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
7581 save_area = frame_pointer_rtx;
7582 set = get_varargs_alias_set ();
7584 max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
7585 if (max > X86_64_REGPARM_MAX)
7586 max = X86_64_REGPARM_MAX;
7588 for (i = cum->regno; i < max; i++)
7590 mem = gen_rtx_MEM (Pmode,
7591 plus_constant (save_area, i * UNITS_PER_WORD));
7592 MEM_NOTRAP_P (mem) = 1;
7593 set_mem_alias_set (mem, set);
7594 emit_move_insn (mem, gen_rtx_REG (Pmode,
7595 x86_64_int_parameter_registers[i]));
7598 if (ix86_varargs_fpr_size)
7600 enum machine_mode smode;
7603 /* Now emit code to save SSE registers. The AX parameter contains number
7604 of SSE parameter registers used to call this function, though all we
7605 actually check here is the zero/non-zero status. */
7607 label = gen_label_rtx ();
7608 test = gen_rtx_EQ (VOIDmode, gen_rtx_REG (QImode, AX_REG), const0_rtx);
7609 emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
7612 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7613 we used movdqa (i.e. TImode) instead? Perhaps even better would
7614 be if we could determine the real mode of the data, via a hook
7615 into pass_stdarg. Ignore all that for now. */
7617 if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
7618 crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);
7620 max = cum->sse_regno + cfun->va_list_fpr_size / 16;
7621 if (max > X86_64_SSE_REGPARM_MAX)
7622 max = X86_64_SSE_REGPARM_MAX;
7624 for (i = cum->sse_regno; i < max; ++i)
7626 mem = plus_constant (save_area, i * 16 + ix86_varargs_gpr_size);
7627 mem = gen_rtx_MEM (smode, mem);
7628 MEM_NOTRAP_P (mem) = 1;
7629 set_mem_alias_set (mem, set);
7630 set_mem_align (mem, GET_MODE_ALIGNMENT (smode));
7632 emit_move_insn (mem, gen_rtx_REG (smode, SSE_REGNO (i)));
7640 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7642 alias_set_type set = get_varargs_alias_set ();
7645 /* Reset to zero, as there might be a sysv vaarg used
7647 ix86_varargs_gpr_size = 0;
7648 ix86_varargs_fpr_size = 0;
7650 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
7654 mem = gen_rtx_MEM (Pmode,
7655 plus_constant (virtual_incoming_args_rtx,
7656 i * UNITS_PER_WORD));
7657 MEM_NOTRAP_P (mem) = 1;
7658 set_mem_alias_set (mem, set);
7660 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
7661 emit_move_insn (mem, reg);
7666 ix86_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode,
7667 tree type, int *pretend_size ATTRIBUTE_UNUSED,
7670 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7671 CUMULATIVE_ARGS next_cum;
7674 /* This argument doesn't appear to be used anymore. Which is good,
7675 because the old code here didn't suppress rtl generation. */
7676 gcc_assert (!no_rtl);
7681 fntype = TREE_TYPE (current_function_decl);
7683 /* For varargs, we do not want to skip the dummy va_dcl argument.
7684 For stdargs, we do want to skip the last named argument. */
7686 if (stdarg_p (fntype))
7687 ix86_function_arg_advance (pack_cumulative_args (&next_cum), mode, type,
7690 if (cum->call_abi == MS_ABI)
7691 setup_incoming_varargs_ms_64 (&next_cum);
7693 setup_incoming_varargs_64 (&next_cum);
7696 /* Checks if TYPE is of kind va_list char *. */
7699 is_va_list_char_pointer (tree type)
7703 /* For 32-bit it is always true. */
7706 canonic = ix86_canonical_va_list_type (type);
7707 return (canonic == ms_va_list_type_node
7708 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
7711 /* Implement va_start. */
7714 ix86_va_start (tree valist, rtx nextarg)
7716 HOST_WIDE_INT words, n_gpr, n_fpr;
7717 tree f_gpr, f_fpr, f_ovf, f_sav;
7718 tree gpr, fpr, ovf, sav, t;
7722 if (flag_split_stack
7723 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7725 unsigned int scratch_regno;
7727 /* When we are splitting the stack, we can't refer to the stack
7728 arguments using internal_arg_pointer, because they may be on
7729 the old stack. The split stack prologue will arrange to
7730 leave a pointer to the old stack arguments in a scratch
7731 register, which we here copy to a pseudo-register. The split
7732 stack prologue can't set the pseudo-register directly because
7733 it (the prologue) runs before any registers have been saved. */
7735 scratch_regno = split_stack_prologue_scratch_regno ();
7736 if (scratch_regno != INVALID_REGNUM)
7740 reg = gen_reg_rtx (Pmode);
7741 cfun->machine->split_stack_varargs_pointer = reg;
7744 emit_move_insn (reg, gen_rtx_REG (Pmode, scratch_regno));
7748 push_topmost_sequence ();
7749 emit_insn_after (seq, entry_of_function ());
7750 pop_topmost_sequence ();
7754 /* Only 64bit target needs something special. */
7755 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7757 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7758 std_expand_builtin_va_start (valist, nextarg);
7763 va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE);
7764 next = expand_binop (ptr_mode, add_optab,
7765 cfun->machine->split_stack_varargs_pointer,
7766 crtl->args.arg_offset_rtx,
7767 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7768 convert_move (va_r, next, 0);
7773 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7774 f_fpr = DECL_CHAIN (f_gpr);
7775 f_ovf = DECL_CHAIN (f_fpr);
7776 f_sav = DECL_CHAIN (f_ovf);
7778 valist = build_simple_mem_ref (valist);
7779 TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
7780 /* The following should be folded into the MEM_REF offset. */
7781 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
7783 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
7785 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
7787 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
7790 /* Count number of gp and fp argument registers used. */
7791 words = crtl->args.info.words;
7792 n_gpr = crtl->args.info.regno;
7793 n_fpr = crtl->args.info.sse_regno;
7795 if (cfun->va_list_gpr_size)
7797 type = TREE_TYPE (gpr);
7798 t = build2 (MODIFY_EXPR, type,
7799 gpr, build_int_cst (type, n_gpr * 8));
7800 TREE_SIDE_EFFECTS (t) = 1;
7801 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7804 if (TARGET_SSE && cfun->va_list_fpr_size)
7806 type = TREE_TYPE (fpr);
7807 t = build2 (MODIFY_EXPR, type, fpr,
7808 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7809 TREE_SIDE_EFFECTS (t) = 1;
7810 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7813 /* Find the overflow area. */
7814 type = TREE_TYPE (ovf);
7815 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7816 ovf_rtx = crtl->args.internal_arg_pointer;
7818 ovf_rtx = cfun->machine->split_stack_varargs_pointer;
7819 t = make_tree (type, ovf_rtx);
7821 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
7822 t = build2 (MODIFY_EXPR, type, ovf, t);
7823 TREE_SIDE_EFFECTS (t) = 1;
7824 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7826 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7828 /* Find the register save area.
7829 Prologue of the function save it right above stack frame. */
7830 type = TREE_TYPE (sav);
7831 t = make_tree (type, frame_pointer_rtx);
7832 if (!ix86_varargs_gpr_size)
7833 t = fold_build_pointer_plus_hwi (t, -8 * X86_64_REGPARM_MAX);
7834 t = build2 (MODIFY_EXPR, type, sav, t);
7835 TREE_SIDE_EFFECTS (t) = 1;
7836 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7840 /* Implement va_arg. */
7843 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7846 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7847 tree f_gpr, f_fpr, f_ovf, f_sav;
7848 tree gpr, fpr, ovf, sav, t;
7850 tree lab_false, lab_over = NULL_TREE;
7855 enum machine_mode nat_mode;
7856 unsigned int arg_boundary;
7858 /* Only 64bit target needs something special. */
7859 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7860 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7862 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7863 f_fpr = DECL_CHAIN (f_gpr);
7864 f_ovf = DECL_CHAIN (f_fpr);
7865 f_sav = DECL_CHAIN (f_ovf);
7867 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7868 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7869 valist = build_va_arg_indirect_ref (valist);
7870 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7871 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7872 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7874 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7876 type = build_pointer_type (type);
7877 size = int_size_in_bytes (type);
7878 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7880 nat_mode = type_natural_mode (type, NULL);
7889 /* Unnamed 256bit vector mode parameters are passed on stack. */
7890 if (!TARGET_64BIT_MS_ABI)
7897 container = construct_container (nat_mode, TYPE_MODE (type),
7898 type, 0, X86_64_REGPARM_MAX,
7899 X86_64_SSE_REGPARM_MAX, intreg,
7904 /* Pull the value out of the saved registers. */
7906 addr = create_tmp_var (ptr_type_node, "addr");
7910 int needed_intregs, needed_sseregs;
7912 tree int_addr, sse_addr;
7914 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7915 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7917 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7919 need_temp = (!REG_P (container)
7920 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7921 || TYPE_ALIGN (type) > 128));
7923 /* In case we are passing structure, verify that it is consecutive block
7924 on the register save area. If not we need to do moves. */
7925 if (!need_temp && !REG_P (container))
7927 /* Verify that all registers are strictly consecutive */
7928 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7932 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7934 rtx slot = XVECEXP (container, 0, i);
7935 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7936 || INTVAL (XEXP (slot, 1)) != i * 16)
7944 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7946 rtx slot = XVECEXP (container, 0, i);
7947 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7948 || INTVAL (XEXP (slot, 1)) != i * 8)
7960 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7961 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7964 /* First ensure that we fit completely in registers. */
7967 t = build_int_cst (TREE_TYPE (gpr),
7968 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7969 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7970 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7971 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7972 gimplify_and_add (t, pre_p);
7976 t = build_int_cst (TREE_TYPE (fpr),
7977 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7978 + X86_64_REGPARM_MAX * 8);
7979 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7980 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7981 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7982 gimplify_and_add (t, pre_p);
7985 /* Compute index to start of area used for integer regs. */
7988 /* int_addr = gpr + sav; */
7989 t = fold_build_pointer_plus (sav, gpr);
7990 gimplify_assign (int_addr, t, pre_p);
7994 /* sse_addr = fpr + sav; */
7995 t = fold_build_pointer_plus (sav, fpr);
7996 gimplify_assign (sse_addr, t, pre_p);
8000 int i, prev_size = 0;
8001 tree temp = create_tmp_var (type, "va_arg_tmp");
8004 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
8005 gimplify_assign (addr, t, pre_p);
8007 for (i = 0; i < XVECLEN (container, 0); i++)
8009 rtx slot = XVECEXP (container, 0, i);
8010 rtx reg = XEXP (slot, 0);
8011 enum machine_mode mode = GET_MODE (reg);
8017 tree dest_addr, dest;
8018 int cur_size = GET_MODE_SIZE (mode);
8020 gcc_assert (prev_size <= INTVAL (XEXP (slot, 1)));
8021 prev_size = INTVAL (XEXP (slot, 1));
8022 if (prev_size + cur_size > size)
8024 cur_size = size - prev_size;
8025 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
8026 if (mode == BLKmode)
8029 piece_type = lang_hooks.types.type_for_mode (mode, 1);
8030 if (mode == GET_MODE (reg))
8031 addr_type = build_pointer_type (piece_type);
8033 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8035 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
8038 if (SSE_REGNO_P (REGNO (reg)))
8040 src_addr = sse_addr;
8041 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
8045 src_addr = int_addr;
8046 src_offset = REGNO (reg) * 8;
8048 src_addr = fold_convert (addr_type, src_addr);
8049 src_addr = fold_build_pointer_plus_hwi (src_addr, src_offset);
8051 dest_addr = fold_convert (daddr_type, addr);
8052 dest_addr = fold_build_pointer_plus_hwi (dest_addr, prev_size);
8053 if (cur_size == GET_MODE_SIZE (mode))
8055 src = build_va_arg_indirect_ref (src_addr);
8056 dest = build_va_arg_indirect_ref (dest_addr);
8058 gimplify_assign (dest, src, pre_p);
8063 = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
8064 3, dest_addr, src_addr,
8065 size_int (cur_size));
8066 gimplify_and_add (copy, pre_p);
8068 prev_size += cur_size;
8074 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
8075 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
8076 gimplify_assign (gpr, t, pre_p);
8081 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
8082 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
8083 gimplify_assign (fpr, t, pre_p);
8086 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
8088 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
8091 /* ... otherwise out of the overflow area. */
8093 /* When we align parameter on stack for caller, if the parameter
8094 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
8095 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
8096 here with caller. */
8097 arg_boundary = ix86_function_arg_boundary (VOIDmode, type);
8098 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
8099 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
8101 /* Care for on-stack alignment if needed. */
8102 if (arg_boundary <= 64 || size == 0)
8106 HOST_WIDE_INT align = arg_boundary / 8;
8107 t = fold_build_pointer_plus_hwi (ovf, align - 1);
8108 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
8109 build_int_cst (TREE_TYPE (t), -align));
8112 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
8113 gimplify_assign (addr, t, pre_p);
8115 t = fold_build_pointer_plus_hwi (t, rsize * UNITS_PER_WORD);
8116 gimplify_assign (unshare_expr (ovf), t, pre_p);
8119 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
8121 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
8122 addr = fold_convert (ptrtype, addr);
8125 addr = build_va_arg_indirect_ref (addr);
8126 return build_va_arg_indirect_ref (addr);
8129 /* Return true if OPNUM's MEM should be matched
8130 in movabs* patterns. */
8133 ix86_check_movabs (rtx insn, int opnum)
8137 set = PATTERN (insn);
8138 if (GET_CODE (set) == PARALLEL)
8139 set = XVECEXP (set, 0, 0);
8140 gcc_assert (GET_CODE (set) == SET);
8141 mem = XEXP (set, opnum);
8142 while (GET_CODE (mem) == SUBREG)
8143 mem = SUBREG_REG (mem);
8144 gcc_assert (MEM_P (mem));
8145 return volatile_ok || !MEM_VOLATILE_P (mem);
8148 /* Initialize the table of extra 80387 mathematical constants. */
8151 init_ext_80387_constants (void)
8153 static const char * cst[5] =
8155 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
8156 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
8157 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
8158 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
8159 "3.1415926535897932385128089594061862044", /* 4: fldpi */
8163 for (i = 0; i < 5; i++)
8165 real_from_string (&ext_80387_constants_table[i], cst[i]);
8166 /* Ensure each constant is rounded to XFmode precision. */
8167 real_convert (&ext_80387_constants_table[i],
8168 XFmode, &ext_80387_constants_table[i]);
8171 ext_80387_constants_init = 1;
8174 /* Return non-zero if the constant is something that
8175 can be loaded with a special instruction. */
8178 standard_80387_constant_p (rtx x)
8180 enum machine_mode mode = GET_MODE (x);
8184 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
8187 if (x == CONST0_RTX (mode))
8189 if (x == CONST1_RTX (mode))
8192 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8194 /* For XFmode constants, try to find a special 80387 instruction when
8195 optimizing for size or on those CPUs that benefit from them. */
8197 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
8201 if (! ext_80387_constants_init)
8202 init_ext_80387_constants ();
8204 for (i = 0; i < 5; i++)
8205 if (real_identical (&r, &ext_80387_constants_table[i]))
8209 /* Load of the constant -0.0 or -1.0 will be split as
8210 fldz;fchs or fld1;fchs sequence. */
8211 if (real_isnegzero (&r))
8213 if (real_identical (&r, &dconstm1))
8219 /* Return the opcode of the special instruction to be used to load
8223 standard_80387_constant_opcode (rtx x)
8225 switch (standard_80387_constant_p (x))
8249 /* Return the CONST_DOUBLE representing the 80387 constant that is
8250 loaded by the specified special instruction. The argument IDX
8251 matches the return value from standard_80387_constant_p. */
8254 standard_80387_constant_rtx (int idx)
8258 if (! ext_80387_constants_init)
8259 init_ext_80387_constants ();
8275 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
8279 /* Return 1 if X is all 0s and 2 if x is all 1s
8280 in supported SSE/AVX vector mode. */
8283 standard_sse_constant_p (rtx x)
8285 enum machine_mode mode = GET_MODE (x);
8287 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
8289 if (vector_all_ones_operand (x, mode))
8311 /* Return the opcode of the special instruction to be used to load
8315 standard_sse_constant_opcode (rtx insn, rtx x)
8317 switch (standard_sse_constant_p (x))
8320 switch (get_attr_mode (insn))
8323 if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8324 return "%vpxor\t%0, %d0";
8326 if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8327 return "%vxorpd\t%0, %d0";
8329 return "%vxorps\t%0, %d0";
8332 if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8333 return "vpxor\t%x0, %x0, %x0";
8335 if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8336 return "vxorpd\t%x0, %x0, %x0";
8338 return "vxorps\t%x0, %x0, %x0";
8346 return "vpcmpeqd\t%0, %0, %0";
8348 return "pcmpeqd\t%0, %0";
8356 /* Returns true if OP contains a symbol reference */
8359 symbolic_reference_mentioned_p (rtx op)
8364 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
8367 fmt = GET_RTX_FORMAT (GET_CODE (op));
8368 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
8374 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
8375 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
8379 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
8386 /* Return true if it is appropriate to emit `ret' instructions in the
8387 body of a function. Do this only if the epilogue is simple, needing a
8388 couple of insns. Prior to reloading, we can't tell how many registers
8389 must be saved, so return false then. Return false if there is no frame
8390 marker to de-allocate. */
8393 ix86_can_use_return_insn_p (void)
8395 struct ix86_frame frame;
8397 if (! reload_completed || frame_pointer_needed)
8400 /* Don't allow more than 32k pop, since that's all we can do
8401 with one instruction. */
8402 if (crtl->args.pops_args && crtl->args.size >= 32768)
8405 ix86_compute_frame_layout (&frame);
8406 return (frame.stack_pointer_offset == UNITS_PER_WORD
8407 && (frame.nregs + frame.nsseregs) == 0);
8410 /* Value should be nonzero if functions must have frame pointers.
8411 Zero means the frame pointer need not be set up (and parms may
8412 be accessed via the stack pointer) in functions that seem suitable. */
8415 ix86_frame_pointer_required (void)
8417 /* If we accessed previous frames, then the generated code expects
8418 to be able to access the saved ebp value in our frame. */
8419 if (cfun->machine->accesses_prev_frame)
8422 /* Several x86 os'es need a frame pointer for other reasons,
8423 usually pertaining to setjmp. */
8424 if (SUBTARGET_FRAME_POINTER_REQUIRED)
8427 /* For older 32-bit runtimes setjmp requires valid frame-pointer. */
8428 if (TARGET_32BIT_MS_ABI && cfun->calls_setjmp)
8431 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
8432 turns off the frame pointer by default. Turn it back on now if
8433 we've not got a leaf function. */
8434 if (TARGET_OMIT_LEAF_FRAME_POINTER
8435 && (!current_function_is_leaf
8436 || ix86_current_function_calls_tls_descriptor))
8439 if (crtl->profile && !flag_fentry)
8445 /* Record that the current function accesses previous call frames. */
8448 ix86_setup_frame_addresses (void)
8450 cfun->machine->accesses_prev_frame = 1;
8453 #ifndef USE_HIDDEN_LINKONCE
8454 # if defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)
8455 # define USE_HIDDEN_LINKONCE 1
8457 # define USE_HIDDEN_LINKONCE 0
8461 static int pic_labels_used;
8463 /* Fills in the label name that should be used for a pc thunk for
8464 the given register. */
8467 get_pc_thunk_name (char name[32], unsigned int regno)
8469 gcc_assert (!TARGET_64BIT);
8471 if (USE_HIDDEN_LINKONCE)
8472 sprintf (name, "__x86.get_pc_thunk.%s", reg_names[regno]);
8474 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
8478 /* This function generates code for -fpic that loads %ebx with
8479 the return address of the caller and then returns. */
8482 ix86_code_end (void)
8487 for (regno = AX_REG; regno <= SP_REG; regno++)
8492 if (!(pic_labels_used & (1 << regno)))
8495 get_pc_thunk_name (name, regno);
8497 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
8498 get_identifier (name),
8499 build_function_type_list (void_type_node, NULL_TREE));
8500 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
8501 NULL_TREE, void_type_node);
8502 TREE_PUBLIC (decl) = 1;
8503 TREE_STATIC (decl) = 1;
8508 switch_to_section (darwin_sections[text_coal_section]);
8509 fputs ("\t.weak_definition\t", asm_out_file);
8510 assemble_name (asm_out_file, name);
8511 fputs ("\n\t.private_extern\t", asm_out_file);
8512 assemble_name (asm_out_file, name);
8513 putc ('\n', asm_out_file);
8514 ASM_OUTPUT_LABEL (asm_out_file, name);
8515 DECL_WEAK (decl) = 1;
8519 if (USE_HIDDEN_LINKONCE)
8521 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
8523 targetm.asm_out.unique_section (decl, 0);
8524 switch_to_section (get_named_section (decl, NULL, 0));
8526 targetm.asm_out.globalize_label (asm_out_file, name);
8527 fputs ("\t.hidden\t", asm_out_file);
8528 assemble_name (asm_out_file, name);
8529 putc ('\n', asm_out_file);
8530 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
8534 switch_to_section (text_section);
8535 ASM_OUTPUT_LABEL (asm_out_file, name);
8538 DECL_INITIAL (decl) = make_node (BLOCK);
8539 current_function_decl = decl;
8540 init_function_start (decl);
8541 first_function_block_is_cold = false;
8542 /* Make sure unwind info is emitted for the thunk if needed. */
8543 final_start_function (emit_barrier (), asm_out_file, 1);
8545 /* Pad stack IP move with 4 instructions (two NOPs count
8546 as one instruction). */
8547 if (TARGET_PAD_SHORT_FUNCTION)
8552 fputs ("\tnop\n", asm_out_file);
8555 xops[0] = gen_rtx_REG (Pmode, regno);
8556 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8557 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
8558 fputs ("\tret\n", asm_out_file);
8559 final_end_function ();
8560 init_insn_lengths ();
8561 free_after_compilation (cfun);
8563 current_function_decl = NULL;
8566 if (flag_split_stack)
8567 file_end_indicate_split_stack ();
8570 /* Emit code for the SET_GOT patterns. */
8573 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
8579 if (TARGET_VXWORKS_RTP && flag_pic)
8581 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8582 xops[2] = gen_rtx_MEM (Pmode,
8583 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
8584 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
8586 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8587 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8588 an unadorned address. */
8589 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
8590 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
8591 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
8595 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
8599 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
8601 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
8604 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8605 is what will be referenced by the Mach-O PIC subsystem. */
8607 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8610 targetm.asm_out.internal_label (asm_out_file, "L",
8611 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
8616 get_pc_thunk_name (name, REGNO (dest));
8617 pic_labels_used |= 1 << REGNO (dest);
8619 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
8620 xops[2] = gen_rtx_MEM (QImode, xops[2]);
8621 output_asm_insn ("call\t%X2", xops);
8622 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8623 is what will be referenced by the Mach-O PIC subsystem. */
8626 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8628 targetm.asm_out.internal_label (asm_out_file, "L",
8629 CODE_LABEL_NUMBER (label));
8634 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
8639 /* Generate an "push" pattern for input ARG. */
8644 struct machine_function *m = cfun->machine;
8646 if (m->fs.cfa_reg == stack_pointer_rtx)
8647 m->fs.cfa_offset += UNITS_PER_WORD;
8648 m->fs.sp_offset += UNITS_PER_WORD;
8650 return gen_rtx_SET (VOIDmode,
8652 gen_rtx_PRE_DEC (Pmode,
8653 stack_pointer_rtx)),
8657 /* Generate an "pop" pattern for input ARG. */
8662 return gen_rtx_SET (VOIDmode,
8665 gen_rtx_POST_INC (Pmode,
8666 stack_pointer_rtx)));
8669 /* Return >= 0 if there is an unused call-clobbered register available
8670 for the entire function. */
8673 ix86_select_alt_pic_regnum (void)
8675 if (current_function_is_leaf
8677 && !ix86_current_function_calls_tls_descriptor)
8680 /* Can't use the same register for both PIC and DRAP. */
8682 drap = REGNO (crtl->drap_reg);
8685 for (i = 2; i >= 0; --i)
8686 if (i != drap && !df_regs_ever_live_p (i))
8690 return INVALID_REGNUM;
8693 /* Return TRUE if we need to save REGNO. */
8696 ix86_save_reg (unsigned int regno, bool maybe_eh_return)
8698 if (pic_offset_table_rtx
8699 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
8700 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8702 || crtl->calls_eh_return
8703 || crtl->uses_const_pool))
8704 return ix86_select_alt_pic_regnum () == INVALID_REGNUM;
8706 if (crtl->calls_eh_return && maybe_eh_return)
8711 unsigned test = EH_RETURN_DATA_REGNO (i);
8712 if (test == INVALID_REGNUM)
8719 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
8722 return (df_regs_ever_live_p (regno)
8723 && !call_used_regs[regno]
8724 && !fixed_regs[regno]
8725 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
8728 /* Return number of saved general prupose registers. */
8731 ix86_nsaved_regs (void)
8736 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8737 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8742 /* Return number of saved SSE registrers. */
8745 ix86_nsaved_sseregs (void)
8750 if (!TARGET_64BIT_MS_ABI)
8752 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8753 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8758 /* Given FROM and TO register numbers, say whether this elimination is
8759 allowed. If stack alignment is needed, we can only replace argument
8760 pointer with hard frame pointer, or replace frame pointer with stack
8761 pointer. Otherwise, frame pointer elimination is automatically
8762 handled and all other eliminations are valid. */
8765 ix86_can_eliminate (const int from, const int to)
8767 if (stack_realign_fp)
8768 return ((from == ARG_POINTER_REGNUM
8769 && to == HARD_FRAME_POINTER_REGNUM)
8770 || (from == FRAME_POINTER_REGNUM
8771 && to == STACK_POINTER_REGNUM));
8773 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
8776 /* Return the offset between two registers, one to be eliminated, and the other
8777 its replacement, at the start of a routine. */
8780 ix86_initial_elimination_offset (int from, int to)
8782 struct ix86_frame frame;
8783 ix86_compute_frame_layout (&frame);
8785 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8786 return frame.hard_frame_pointer_offset;
8787 else if (from == FRAME_POINTER_REGNUM
8788 && to == HARD_FRAME_POINTER_REGNUM)
8789 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
8792 gcc_assert (to == STACK_POINTER_REGNUM);
8794 if (from == ARG_POINTER_REGNUM)
8795 return frame.stack_pointer_offset;
8797 gcc_assert (from == FRAME_POINTER_REGNUM);
8798 return frame.stack_pointer_offset - frame.frame_pointer_offset;
8802 /* In a dynamically-aligned function, we can't know the offset from
8803 stack pointer to frame pointer, so we must ensure that setjmp
8804 eliminates fp against the hard fp (%ebp) rather than trying to
8805 index from %esp up to the top of the frame across a gap that is
8806 of unknown (at compile-time) size. */
8808 ix86_builtin_setjmp_frame_value (void)
8810 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
8813 /* When using -fsplit-stack, the allocation routines set a field in
8814 the TCB to the bottom of the stack plus this much space, measured
8817 #define SPLIT_STACK_AVAILABLE 256
8819 /* Fill structure ix86_frame about frame of currently computed function. */
8822 ix86_compute_frame_layout (struct ix86_frame *frame)
8824 unsigned int stack_alignment_needed;
8825 HOST_WIDE_INT offset;
8826 unsigned int preferred_alignment;
8827 HOST_WIDE_INT size = get_frame_size ();
8828 HOST_WIDE_INT to_allocate;
8830 frame->nregs = ix86_nsaved_regs ();
8831 frame->nsseregs = ix86_nsaved_sseregs ();
8833 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8834 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8836 /* 64-bit MS ABI seem to require stack alignment to be always 16 except for
8837 function prologues and leaf. */
8838 if ((TARGET_64BIT_MS_ABI && preferred_alignment < 16)
8839 && (!current_function_is_leaf || cfun->calls_alloca != 0
8840 || ix86_current_function_calls_tls_descriptor))
8842 preferred_alignment = 16;
8843 stack_alignment_needed = 16;
8844 crtl->preferred_stack_boundary = 128;
8845 crtl->stack_alignment_needed = 128;
8848 gcc_assert (!size || stack_alignment_needed);
8849 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8850 gcc_assert (preferred_alignment <= stack_alignment_needed);
8852 /* For SEH we have to limit the amount of code movement into the prologue.
8853 At present we do this via a BLOCKAGE, at which point there's very little
8854 scheduling that can be done, which means that there's very little point
8855 in doing anything except PUSHs. */
8857 cfun->machine->use_fast_prologue_epilogue = false;
8859 /* During reload iteration the amount of registers saved can change.
8860 Recompute the value as needed. Do not recompute when amount of registers
8861 didn't change as reload does multiple calls to the function and does not
8862 expect the decision to change within single iteration. */
8863 else if (!optimize_function_for_size_p (cfun)
8864 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8866 int count = frame->nregs;
8867 struct cgraph_node *node = cgraph_get_node (current_function_decl);
8869 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8871 /* The fast prologue uses move instead of push to save registers. This
8872 is significantly longer, but also executes faster as modern hardware
8873 can execute the moves in parallel, but can't do that for push/pop.
8875 Be careful about choosing what prologue to emit: When function takes
8876 many instructions to execute we may use slow version as well as in
8877 case function is known to be outside hot spot (this is known with
8878 feedback only). Weight the size of function by number of registers
8879 to save as it is cheap to use one or two push instructions but very
8880 slow to use many of them. */
8882 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8883 if (node->frequency < NODE_FREQUENCY_NORMAL
8884 || (flag_branch_probabilities
8885 && node->frequency < NODE_FREQUENCY_HOT))
8886 cfun->machine->use_fast_prologue_epilogue = false;
8888 cfun->machine->use_fast_prologue_epilogue
8889 = !expensive_function_p (count);
8892 frame->save_regs_using_mov
8893 = (TARGET_PROLOGUE_USING_MOVE && cfun->machine->use_fast_prologue_epilogue
8894 /* If static stack checking is enabled and done with probes,
8895 the registers need to be saved before allocating the frame. */
8896 && flag_stack_check != STATIC_BUILTIN_STACK_CHECK);
8898 /* Skip return address. */
8899 offset = UNITS_PER_WORD;
8901 /* Skip pushed static chain. */
8902 if (ix86_static_chain_on_stack)
8903 offset += UNITS_PER_WORD;
8905 /* Skip saved base pointer. */
8906 if (frame_pointer_needed)
8907 offset += UNITS_PER_WORD;
8908 frame->hfp_save_offset = offset;
8910 /* The traditional frame pointer location is at the top of the frame. */
8911 frame->hard_frame_pointer_offset = offset;
8913 /* Register save area */
8914 offset += frame->nregs * UNITS_PER_WORD;
8915 frame->reg_save_offset = offset;
8917 /* Align and set SSE register save area. */
8918 if (frame->nsseregs)
8920 /* The only ABI that has saved SSE registers (Win64) also has a
8921 16-byte aligned default stack, and thus we don't need to be
8922 within the re-aligned local stack frame to save them. */
8923 gcc_assert (INCOMING_STACK_BOUNDARY >= 128);
8924 offset = (offset + 16 - 1) & -16;
8925 offset += frame->nsseregs * 16;
8927 frame->sse_reg_save_offset = offset;
8929 /* The re-aligned stack starts here. Values before this point are not
8930 directly comparable with values below this point. In order to make
8931 sure that no value happens to be the same before and after, force
8932 the alignment computation below to add a non-zero value. */
8933 if (stack_realign_fp)
8934 offset = (offset + stack_alignment_needed) & -stack_alignment_needed;
8937 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8938 offset += frame->va_arg_size;
8940 /* Align start of frame for local function. */
8941 if (stack_realign_fp
8942 || offset != frame->sse_reg_save_offset
8944 || !current_function_is_leaf
8945 || cfun->calls_alloca
8946 || ix86_current_function_calls_tls_descriptor)
8947 offset = (offset + stack_alignment_needed - 1) & -stack_alignment_needed;
8949 /* Frame pointer points here. */
8950 frame->frame_pointer_offset = offset;
8954 /* Add outgoing arguments area. Can be skipped if we eliminated
8955 all the function calls as dead code.
8956 Skipping is however impossible when function calls alloca. Alloca
8957 expander assumes that last crtl->outgoing_args_size
8958 of stack frame are unused. */
8959 if (ACCUMULATE_OUTGOING_ARGS
8960 && (!current_function_is_leaf || cfun->calls_alloca
8961 || ix86_current_function_calls_tls_descriptor))
8963 offset += crtl->outgoing_args_size;
8964 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8967 frame->outgoing_arguments_size = 0;
8969 /* Align stack boundary. Only needed if we're calling another function
8971 if (!current_function_is_leaf || cfun->calls_alloca
8972 || ix86_current_function_calls_tls_descriptor)
8973 offset = (offset + preferred_alignment - 1) & -preferred_alignment;
8975 /* We've reached end of stack frame. */
8976 frame->stack_pointer_offset = offset;
8978 /* Size prologue needs to allocate. */
8979 to_allocate = offset - frame->sse_reg_save_offset;
8981 if ((!to_allocate && frame->nregs <= 1)
8982 || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000))
8983 frame->save_regs_using_mov = false;
8985 if (ix86_using_red_zone ()
8986 && current_function_sp_is_unchanging
8987 && current_function_is_leaf
8988 && !ix86_current_function_calls_tls_descriptor)
8990 frame->red_zone_size = to_allocate;
8991 if (frame->save_regs_using_mov)
8992 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8993 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8994 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8997 frame->red_zone_size = 0;
8998 frame->stack_pointer_offset -= frame->red_zone_size;
9000 /* The SEH frame pointer location is near the bottom of the frame.
9001 This is enforced by the fact that the difference between the
9002 stack pointer and the frame pointer is limited to 240 bytes in
9003 the unwind data structure. */
9008 /* If we can leave the frame pointer where it is, do so. */
9009 diff = frame->stack_pointer_offset - frame->hard_frame_pointer_offset;
9010 if (diff > 240 || (diff & 15) != 0)
9012 /* Ideally we'd determine what portion of the local stack frame
9013 (within the constraint of the lowest 240) is most heavily used.
9014 But without that complication, simply bias the frame pointer
9015 by 128 bytes so as to maximize the amount of the local stack
9016 frame that is addressable with 8-bit offsets. */
9017 frame->hard_frame_pointer_offset = frame->stack_pointer_offset - 128;
9022 /* This is semi-inlined memory_address_length, but simplified
9023 since we know that we're always dealing with reg+offset, and
9024 to avoid having to create and discard all that rtl. */
9027 choose_baseaddr_len (unsigned int regno, HOST_WIDE_INT offset)
9033 /* EBP and R13 cannot be encoded without an offset. */
9034 len = (regno == BP_REG || regno == R13_REG);
9036 else if (IN_RANGE (offset, -128, 127))
9039 /* ESP and R12 must be encoded with a SIB byte. */
9040 if (regno == SP_REG || regno == R12_REG)
9046 /* Return an RTX that points to CFA_OFFSET within the stack frame.
9047 The valid base registers are taken from CFUN->MACHINE->FS. */
9050 choose_baseaddr (HOST_WIDE_INT cfa_offset)
9052 const struct machine_function *m = cfun->machine;
9053 rtx base_reg = NULL;
9054 HOST_WIDE_INT base_offset = 0;
9056 if (m->use_fast_prologue_epilogue)
9058 /* Choose the base register most likely to allow the most scheduling
9059 opportunities. Generally FP is valid througout the function,
9060 while DRAP must be reloaded within the epilogue. But choose either
9061 over the SP due to increased encoding size. */
9065 base_reg = hard_frame_pointer_rtx;
9066 base_offset = m->fs.fp_offset - cfa_offset;
9068 else if (m->fs.drap_valid)
9070 base_reg = crtl->drap_reg;
9071 base_offset = 0 - cfa_offset;
9073 else if (m->fs.sp_valid)
9075 base_reg = stack_pointer_rtx;
9076 base_offset = m->fs.sp_offset - cfa_offset;
9081 HOST_WIDE_INT toffset;
9084 /* Choose the base register with the smallest address encoding.
9085 With a tie, choose FP > DRAP > SP. */
9088 base_reg = stack_pointer_rtx;
9089 base_offset = m->fs.sp_offset - cfa_offset;
9090 len = choose_baseaddr_len (STACK_POINTER_REGNUM, base_offset);
9092 if (m->fs.drap_valid)
9094 toffset = 0 - cfa_offset;
9095 tlen = choose_baseaddr_len (REGNO (crtl->drap_reg), toffset);
9098 base_reg = crtl->drap_reg;
9099 base_offset = toffset;
9105 toffset = m->fs.fp_offset - cfa_offset;
9106 tlen = choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM, toffset);
9109 base_reg = hard_frame_pointer_rtx;
9110 base_offset = toffset;
9115 gcc_assert (base_reg != NULL);
9117 return plus_constant (base_reg, base_offset);
9120 /* Emit code to save registers in the prologue. */
9123 ix86_emit_save_regs (void)
9128 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
9129 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9131 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
9132 RTX_FRAME_RELATED_P (insn) = 1;
9136 /* Emit a single register save at CFA - CFA_OFFSET. */
9139 ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
9140 HOST_WIDE_INT cfa_offset)
9142 struct machine_function *m = cfun->machine;
9143 rtx reg = gen_rtx_REG (mode, regno);
9144 rtx mem, addr, base, insn;
9146 addr = choose_baseaddr (cfa_offset);
9147 mem = gen_frame_mem (mode, addr);
9149 /* For SSE saves, we need to indicate the 128-bit alignment. */
9150 set_mem_align (mem, GET_MODE_ALIGNMENT (mode));
9152 insn = emit_move_insn (mem, reg);
9153 RTX_FRAME_RELATED_P (insn) = 1;
9156 if (GET_CODE (base) == PLUS)
9157 base = XEXP (base, 0);
9158 gcc_checking_assert (REG_P (base));
9160 /* When saving registers into a re-aligned local stack frame, avoid
9161 any tricky guessing by dwarf2out. */
9162 if (m->fs.realigned)
9164 gcc_checking_assert (stack_realign_drap);
9166 if (regno == REGNO (crtl->drap_reg))
9168 /* A bit of a hack. We force the DRAP register to be saved in
9169 the re-aligned stack frame, which provides us with a copy
9170 of the CFA that will last past the prologue. Install it. */
9171 gcc_checking_assert (cfun->machine->fs.fp_valid);
9172 addr = plus_constant (hard_frame_pointer_rtx,
9173 cfun->machine->fs.fp_offset - cfa_offset);
9174 mem = gen_rtx_MEM (mode, addr);
9175 add_reg_note (insn, REG_CFA_DEF_CFA, mem);
9179 /* The frame pointer is a stable reference within the
9180 aligned frame. Use it. */
9181 gcc_checking_assert (cfun->machine->fs.fp_valid);
9182 addr = plus_constant (hard_frame_pointer_rtx,
9183 cfun->machine->fs.fp_offset - cfa_offset);
9184 mem = gen_rtx_MEM (mode, addr);
9185 add_reg_note (insn, REG_CFA_EXPRESSION,
9186 gen_rtx_SET (VOIDmode, mem, reg));
9190 /* The memory may not be relative to the current CFA register,
9191 which means that we may need to generate a new pattern for
9192 use by the unwind info. */
9193 else if (base != m->fs.cfa_reg)
9195 addr = plus_constant (m->fs.cfa_reg, m->fs.cfa_offset - cfa_offset);
9196 mem = gen_rtx_MEM (mode, addr);
9197 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, mem, reg));
9201 /* Emit code to save registers using MOV insns.
9202 First register is stored at CFA - CFA_OFFSET. */
9204 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset)
9208 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9209 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9211 ix86_emit_save_reg_using_mov (Pmode, regno, cfa_offset);
9212 cfa_offset -= UNITS_PER_WORD;
9216 /* Emit code to save SSE registers using MOV insns.
9217 First register is stored at CFA - CFA_OFFSET. */
9219 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset)
9223 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9224 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9226 ix86_emit_save_reg_using_mov (V4SFmode, regno, cfa_offset);
9231 static GTY(()) rtx queued_cfa_restores;
9233 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
9234 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
9235 Don't add the note if the previously saved value will be left untouched
9236 within stack red-zone till return, as unwinders can find the same value
9237 in the register and on the stack. */
9240 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT cfa_offset)
9242 if (!crtl->shrink_wrapped
9243 && cfa_offset <= cfun->machine->fs.red_zone_offset)
9248 add_reg_note (insn, REG_CFA_RESTORE, reg);
9249 RTX_FRAME_RELATED_P (insn) = 1;
9253 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
9256 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
9259 ix86_add_queued_cfa_restore_notes (rtx insn)
9262 if (!queued_cfa_restores)
9264 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
9266 XEXP (last, 1) = REG_NOTES (insn);
9267 REG_NOTES (insn) = queued_cfa_restores;
9268 queued_cfa_restores = NULL_RTX;
9269 RTX_FRAME_RELATED_P (insn) = 1;
9272 /* Expand prologue or epilogue stack adjustment.
9273 The pattern exist to put a dependency on all ebp-based memory accesses.
9274 STYLE should be negative if instructions should be marked as frame related,
9275 zero if %r11 register is live and cannot be freely used and positive
9279 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
9280 int style, bool set_cfa)
9282 struct machine_function *m = cfun->machine;
9284 bool add_frame_related_expr = false;
9287 insn = gen_pro_epilogue_adjust_stack_si_add (dest, src, offset);
9288 else if (x86_64_immediate_operand (offset, DImode))
9289 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, offset);
9293 /* r11 is used by indirect sibcall return as well, set before the
9294 epilogue and used after the epilogue. */
9296 tmp = gen_rtx_REG (DImode, R11_REG);
9299 gcc_assert (src != hard_frame_pointer_rtx
9300 && dest != hard_frame_pointer_rtx);
9301 tmp = hard_frame_pointer_rtx;
9303 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
9305 add_frame_related_expr = true;
9307 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, tmp);
9310 insn = emit_insn (insn);
9312 ix86_add_queued_cfa_restore_notes (insn);
9318 gcc_assert (m->fs.cfa_reg == src);
9319 m->fs.cfa_offset += INTVAL (offset);
9320 m->fs.cfa_reg = dest;
9322 r = gen_rtx_PLUS (Pmode, src, offset);
9323 r = gen_rtx_SET (VOIDmode, dest, r);
9324 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9325 RTX_FRAME_RELATED_P (insn) = 1;
9329 RTX_FRAME_RELATED_P (insn) = 1;
9330 if (add_frame_related_expr)
9332 rtx r = gen_rtx_PLUS (Pmode, src, offset);
9333 r = gen_rtx_SET (VOIDmode, dest, r);
9334 add_reg_note (insn, REG_FRAME_RELATED_EXPR, r);
9338 if (dest == stack_pointer_rtx)
9340 HOST_WIDE_INT ooffset = m->fs.sp_offset;
9341 bool valid = m->fs.sp_valid;
9343 if (src == hard_frame_pointer_rtx)
9345 valid = m->fs.fp_valid;
9346 ooffset = m->fs.fp_offset;
9348 else if (src == crtl->drap_reg)
9350 valid = m->fs.drap_valid;
9355 /* Else there are two possibilities: SP itself, which we set
9356 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
9357 taken care of this by hand along the eh_return path. */
9358 gcc_checking_assert (src == stack_pointer_rtx
9359 || offset == const0_rtx);
9362 m->fs.sp_offset = ooffset - INTVAL (offset);
9363 m->fs.sp_valid = valid;
9367 /* Find an available register to be used as dynamic realign argument
9368 pointer regsiter. Such a register will be written in prologue and
9369 used in begin of body, so it must not be
9370 1. parameter passing register.
9372 We reuse static-chain register if it is available. Otherwise, we
9373 use DI for i386 and R13 for x86-64. We chose R13 since it has
9376 Return: the regno of chosen register. */
9379 find_drap_reg (void)
9381 tree decl = cfun->decl;
9385 /* Use R13 for nested function or function need static chain.
9386 Since function with tail call may use any caller-saved
9387 registers in epilogue, DRAP must not use caller-saved
9388 register in such case. */
9389 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9396 /* Use DI for nested function or function need static chain.
9397 Since function with tail call may use any caller-saved
9398 registers in epilogue, DRAP must not use caller-saved
9399 register in such case. */
9400 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9403 /* Reuse static chain register if it isn't used for parameter
9405 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2)
9407 unsigned int ccvt = ix86_get_callcvt (TREE_TYPE (decl));
9408 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) == 0)
9415 /* Return minimum incoming stack alignment. */
9418 ix86_minimum_incoming_stack_boundary (bool sibcall)
9420 unsigned int incoming_stack_boundary;
9422 /* Prefer the one specified at command line. */
9423 if (ix86_user_incoming_stack_boundary)
9424 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
9425 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
9426 if -mstackrealign is used, it isn't used for sibcall check and
9427 estimated stack alignment is 128bit. */
9430 && ix86_force_align_arg_pointer
9431 && crtl->stack_alignment_estimated == 128)
9432 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9434 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
9436 /* Incoming stack alignment can be changed on individual functions
9437 via force_align_arg_pointer attribute. We use the smallest
9438 incoming stack boundary. */
9439 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
9440 && lookup_attribute (ix86_force_align_arg_pointer_string,
9441 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
9442 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9444 /* The incoming stack frame has to be aligned at least at
9445 parm_stack_boundary. */
9446 if (incoming_stack_boundary < crtl->parm_stack_boundary)
9447 incoming_stack_boundary = crtl->parm_stack_boundary;
9449 /* Stack at entrance of main is aligned by runtime. We use the
9450 smallest incoming stack boundary. */
9451 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
9452 && DECL_NAME (current_function_decl)
9453 && MAIN_NAME_P (DECL_NAME (current_function_decl))
9454 && DECL_FILE_SCOPE_P (current_function_decl))
9455 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
9457 return incoming_stack_boundary;
9460 /* Update incoming stack boundary and estimated stack alignment. */
9463 ix86_update_stack_boundary (void)
9465 ix86_incoming_stack_boundary
9466 = ix86_minimum_incoming_stack_boundary (false);
9468 /* x86_64 vararg needs 16byte stack alignment for register save
9472 && crtl->stack_alignment_estimated < 128)
9473 crtl->stack_alignment_estimated = 128;
9476 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9477 needed or an rtx for DRAP otherwise. */
9480 ix86_get_drap_rtx (void)
9482 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
9483 crtl->need_drap = true;
9485 if (stack_realign_drap)
9487 /* Assign DRAP to vDRAP and returns vDRAP */
9488 unsigned int regno = find_drap_reg ();
9493 arg_ptr = gen_rtx_REG (Pmode, regno);
9494 crtl->drap_reg = arg_ptr;
9497 drap_vreg = copy_to_reg (arg_ptr);
9501 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
9504 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
9505 RTX_FRAME_RELATED_P (insn) = 1;
9513 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9516 ix86_internal_arg_pointer (void)
9518 return virtual_incoming_args_rtx;
9521 struct scratch_reg {
9526 /* Return a short-lived scratch register for use on function entry.
9527 In 32-bit mode, it is valid only after the registers are saved
9528 in the prologue. This register must be released by means of
9529 release_scratch_register_on_entry once it is dead. */
9532 get_scratch_register_on_entry (struct scratch_reg *sr)
9540 /* We always use R11 in 64-bit mode. */
9545 tree decl = current_function_decl, fntype = TREE_TYPE (decl);
9547 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
9548 bool static_chain_p = DECL_STATIC_CHAIN (decl);
9549 int regparm = ix86_function_regparm (fntype, decl);
9551 = crtl->drap_reg ? REGNO (crtl->drap_reg) : INVALID_REGNUM;
9553 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9554 for the static chain register. */
9555 if ((regparm < 1 || (fastcall_p && !static_chain_p))
9556 && drap_regno != AX_REG)
9558 else if (regparm < 2 && drap_regno != DX_REG)
9560 /* ecx is the static chain register. */
9561 else if (regparm < 3 && !fastcall_p && !static_chain_p
9562 && drap_regno != CX_REG)
9564 else if (ix86_save_reg (BX_REG, true))
9566 /* esi is the static chain register. */
9567 else if (!(regparm == 3 && static_chain_p)
9568 && ix86_save_reg (SI_REG, true))
9570 else if (ix86_save_reg (DI_REG, true))
9574 regno = (drap_regno == AX_REG ? DX_REG : AX_REG);
9579 sr->reg = gen_rtx_REG (Pmode, regno);
9582 rtx insn = emit_insn (gen_push (sr->reg));
9583 RTX_FRAME_RELATED_P (insn) = 1;
9587 /* Release a scratch register obtained from the preceding function. */
9590 release_scratch_register_on_entry (struct scratch_reg *sr)
9594 rtx x, insn = emit_insn (gen_pop (sr->reg));
9596 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
9597 RTX_FRAME_RELATED_P (insn) = 1;
9598 x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
9599 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
9600 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
9604 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
9606 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
9609 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
9611 /* We skip the probe for the first interval + a small dope of 4 words and
9612 probe that many bytes past the specified size to maintain a protection
9613 area at the botton of the stack. */
9614 const int dope = 4 * UNITS_PER_WORD;
9615 rtx size_rtx = GEN_INT (size), last;
9617 /* See if we have a constant small number of probes to generate. If so,
9618 that's the easy case. The run-time loop is made up of 11 insns in the
9619 generic case while the compile-time loop is made up of 3+2*(n-1) insns
9620 for n # of intervals. */
9621 if (size <= 5 * PROBE_INTERVAL)
9623 HOST_WIDE_INT i, adjust;
9624 bool first_probe = true;
9626 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
9627 values of N from 1 until it exceeds SIZE. If only one probe is
9628 needed, this will not generate any code. Then adjust and probe
9629 to PROBE_INTERVAL + SIZE. */
9630 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
9634 adjust = 2 * PROBE_INTERVAL + dope;
9635 first_probe = false;
9638 adjust = PROBE_INTERVAL;
9640 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9641 plus_constant (stack_pointer_rtx, -adjust)));
9642 emit_stack_probe (stack_pointer_rtx);
9646 adjust = size + PROBE_INTERVAL + dope;
9648 adjust = size + PROBE_INTERVAL - i;
9650 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9651 plus_constant (stack_pointer_rtx, -adjust)));
9652 emit_stack_probe (stack_pointer_rtx);
9654 /* Adjust back to account for the additional first interval. */
9655 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9656 plus_constant (stack_pointer_rtx,
9657 PROBE_INTERVAL + dope)));
9660 /* Otherwise, do the same as above, but in a loop. Note that we must be
9661 extra careful with variables wrapping around because we might be at
9662 the very top (or the very bottom) of the address space and we have
9663 to be able to handle this case properly; in particular, we use an
9664 equality test for the loop condition. */
9667 HOST_WIDE_INT rounded_size;
9668 struct scratch_reg sr;
9670 get_scratch_register_on_entry (&sr);
9673 /* Step 1: round SIZE to the previous multiple of the interval. */
9675 rounded_size = size & -PROBE_INTERVAL;
9678 /* Step 2: compute initial and final value of the loop counter. */
9680 /* SP = SP_0 + PROBE_INTERVAL. */
9681 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9682 plus_constant (stack_pointer_rtx,
9683 - (PROBE_INTERVAL + dope))));
9685 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
9686 emit_move_insn (sr.reg, GEN_INT (-rounded_size));
9687 emit_insn (gen_rtx_SET (VOIDmode, sr.reg,
9688 gen_rtx_PLUS (Pmode, sr.reg,
9689 stack_pointer_rtx)));
9694 while (SP != LAST_ADDR)
9696 SP = SP + PROBE_INTERVAL
9700 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
9701 values of N from 1 until it is equal to ROUNDED_SIZE. */
9703 emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg, size_rtx));
9706 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
9707 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
9709 if (size != rounded_size)
9711 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9712 plus_constant (stack_pointer_rtx,
9713 rounded_size - size)));
9714 emit_stack_probe (stack_pointer_rtx);
9717 /* Adjust back to account for the additional first interval. */
9718 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9719 plus_constant (stack_pointer_rtx,
9720 PROBE_INTERVAL + dope)));
9722 release_scratch_register_on_entry (&sr);
9725 gcc_assert (cfun->machine->fs.cfa_reg != stack_pointer_rtx);
9727 /* Even if the stack pointer isn't the CFA register, we need to correctly
9728 describe the adjustments made to it, in particular differentiate the
9729 frame-related ones from the frame-unrelated ones. */
9732 rtx expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
9733 XVECEXP (expr, 0, 0)
9734 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9735 plus_constant (stack_pointer_rtx, -size));
9736 XVECEXP (expr, 0, 1)
9737 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9738 plus_constant (stack_pointer_rtx,
9739 PROBE_INTERVAL + dope + size));
9740 add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
9741 RTX_FRAME_RELATED_P (last) = 1;
9743 cfun->machine->fs.sp_offset += size;
9746 /* Make sure nothing is scheduled before we are done. */
9747 emit_insn (gen_blockage ());
9750 /* Adjust the stack pointer up to REG while probing it. */
9753 output_adjust_stack_and_probe (rtx reg)
9755 static int labelno = 0;
9756 char loop_lab[32], end_lab[32];
9759 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
9760 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
9762 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
9764 /* Jump to END_LAB if SP == LAST_ADDR. */
9765 xops[0] = stack_pointer_rtx;
9767 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
9768 fputs ("\tje\t", asm_out_file);
9769 assemble_name_raw (asm_out_file, end_lab);
9770 fputc ('\n', asm_out_file);
9772 /* SP = SP + PROBE_INTERVAL. */
9773 xops[1] = GEN_INT (PROBE_INTERVAL);
9774 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
9777 xops[1] = const0_rtx;
9778 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops);
9780 fprintf (asm_out_file, "\tjmp\t");
9781 assemble_name_raw (asm_out_file, loop_lab);
9782 fputc ('\n', asm_out_file);
9784 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
9789 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
9790 inclusive. These are offsets from the current stack pointer. */
9793 ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
9795 /* See if we have a constant small number of probes to generate. If so,
9796 that's the easy case. The run-time loop is made up of 7 insns in the
9797 generic case while the compile-time loop is made up of n insns for n #
9799 if (size <= 7 * PROBE_INTERVAL)
9803 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
9804 it exceeds SIZE. If only one probe is needed, this will not
9805 generate any code. Then probe at FIRST + SIZE. */
9806 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
9807 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
9809 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
9812 /* Otherwise, do the same as above, but in a loop. Note that we must be
9813 extra careful with variables wrapping around because we might be at
9814 the very top (or the very bottom) of the address space and we have
9815 to be able to handle this case properly; in particular, we use an
9816 equality test for the loop condition. */
9819 HOST_WIDE_INT rounded_size, last;
9820 struct scratch_reg sr;
9822 get_scratch_register_on_entry (&sr);
9825 /* Step 1: round SIZE to the previous multiple of the interval. */
9827 rounded_size = size & -PROBE_INTERVAL;
9830 /* Step 2: compute initial and final value of the loop counter. */
9832 /* TEST_OFFSET = FIRST. */
9833 emit_move_insn (sr.reg, GEN_INT (-first));
9835 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
9836 last = first + rounded_size;
9841 while (TEST_ADDR != LAST_ADDR)
9843 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
9847 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
9848 until it is equal to ROUNDED_SIZE. */
9850 emit_insn (ix86_gen_probe_stack_range (sr.reg, sr.reg, GEN_INT (-last)));
9853 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
9854 that SIZE is equal to ROUNDED_SIZE. */
9856 if (size != rounded_size)
9857 emit_stack_probe (plus_constant (gen_rtx_PLUS (Pmode,
9860 rounded_size - size));
9862 release_scratch_register_on_entry (&sr);
9865 /* Make sure nothing is scheduled before we are done. */
9866 emit_insn (gen_blockage ());
9869 /* Probe a range of stack addresses from REG to END, inclusive. These are
9870 offsets from the current stack pointer. */
9873 output_probe_stack_range (rtx reg, rtx end)
9875 static int labelno = 0;
9876 char loop_lab[32], end_lab[32];
9879 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
9880 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
9882 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
9884 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
9887 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
9888 fputs ("\tje\t", asm_out_file);
9889 assemble_name_raw (asm_out_file, end_lab);
9890 fputc ('\n', asm_out_file);
9892 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
9893 xops[1] = GEN_INT (PROBE_INTERVAL);
9894 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
9896 /* Probe at TEST_ADDR. */
9897 xops[0] = stack_pointer_rtx;
9899 xops[2] = const0_rtx;
9900 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops);
9902 fprintf (asm_out_file, "\tjmp\t");
9903 assemble_name_raw (asm_out_file, loop_lab);
9904 fputc ('\n', asm_out_file);
9906 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
9911 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
9912 to be generated in correct form. */
9914 ix86_finalize_stack_realign_flags (void)
9916 /* Check if stack realign is really needed after reload, and
9917 stores result in cfun */
9918 unsigned int incoming_stack_boundary
9919 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
9920 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
9921 unsigned int stack_realign = (incoming_stack_boundary
9922 < (current_function_is_leaf
9923 ? crtl->max_used_stack_slot_alignment
9924 : crtl->stack_alignment_needed));
9926 if (crtl->stack_realign_finalized)
9928 /* After stack_realign_needed is finalized, we can't no longer
9930 gcc_assert (crtl->stack_realign_needed == stack_realign);
9934 /* If the only reason for frame_pointer_needed is that we conservatively
9935 assumed stack realignment might be needed, but in the end nothing that
9936 needed the stack alignment had been spilled, clear frame_pointer_needed
9937 and say we don't need stack realignment. */
9940 && frame_pointer_needed
9941 && current_function_is_leaf
9942 && flag_omit_frame_pointer
9943 && current_function_sp_is_unchanging
9944 && !ix86_current_function_calls_tls_descriptor
9945 && !crtl->accesses_prior_frames
9946 && !cfun->calls_alloca
9947 && !crtl->calls_eh_return
9948 && !(flag_stack_check && STACK_CHECK_MOVING_SP)
9949 && !ix86_frame_pointer_required ()
9950 && get_frame_size () == 0
9951 && ix86_nsaved_sseregs () == 0
9952 && ix86_varargs_gpr_size + ix86_varargs_fpr_size == 0)
9954 HARD_REG_SET set_up_by_prologue, prologue_used;
9957 CLEAR_HARD_REG_SET (prologue_used);
9958 CLEAR_HARD_REG_SET (set_up_by_prologue);
9959 add_to_hard_reg_set (&set_up_by_prologue, Pmode, STACK_POINTER_REGNUM);
9960 add_to_hard_reg_set (&set_up_by_prologue, Pmode, ARG_POINTER_REGNUM);
9961 add_to_hard_reg_set (&set_up_by_prologue, Pmode,
9962 HARD_FRAME_POINTER_REGNUM);
9966 FOR_BB_INSNS (bb, insn)
9967 if (NONDEBUG_INSN_P (insn)
9968 && requires_stack_frame_p (insn, prologue_used,
9969 set_up_by_prologue))
9971 crtl->stack_realign_needed = stack_realign;
9972 crtl->stack_realign_finalized = true;
9977 frame_pointer_needed = false;
9978 stack_realign = false;
9979 crtl->max_used_stack_slot_alignment = incoming_stack_boundary;
9980 crtl->stack_alignment_needed = incoming_stack_boundary;
9981 crtl->stack_alignment_estimated = incoming_stack_boundary;
9982 if (crtl->preferred_stack_boundary > incoming_stack_boundary)
9983 crtl->preferred_stack_boundary = incoming_stack_boundary;
9984 df_finish_pass (true);
9985 df_scan_alloc (NULL);
9987 df_compute_regs_ever_live (true);
9991 crtl->stack_realign_needed = stack_realign;
9992 crtl->stack_realign_finalized = true;
9995 /* Expand the prologue into a bunch of separate insns. */
9998 ix86_expand_prologue (void)
10000 struct machine_function *m = cfun->machine;
10003 struct ix86_frame frame;
10004 HOST_WIDE_INT allocate;
10005 bool int_registers_saved;
10007 ix86_finalize_stack_realign_flags ();
10009 /* DRAP should not coexist with stack_realign_fp */
10010 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
10012 memset (&m->fs, 0, sizeof (m->fs));
10014 /* Initialize CFA state for before the prologue. */
10015 m->fs.cfa_reg = stack_pointer_rtx;
10016 m->fs.cfa_offset = INCOMING_FRAME_SP_OFFSET;
10018 /* Track SP offset to the CFA. We continue tracking this after we've
10019 swapped the CFA register away from SP. In the case of re-alignment
10020 this is fudged; we're interested to offsets within the local frame. */
10021 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10022 m->fs.sp_valid = true;
10024 ix86_compute_frame_layout (&frame);
10026 if (!TARGET_64BIT && ix86_function_ms_hook_prologue (current_function_decl))
10028 /* We should have already generated an error for any use of
10029 ms_hook on a nested function. */
10030 gcc_checking_assert (!ix86_static_chain_on_stack);
10032 /* Check if profiling is active and we shall use profiling before
10033 prologue variant. If so sorry. */
10034 if (crtl->profile && flag_fentry != 0)
10035 sorry ("ms_hook_prologue attribute isn%'t compatible "
10036 "with -mfentry for 32-bit");
10038 /* In ix86_asm_output_function_label we emitted:
10039 8b ff movl.s %edi,%edi
10041 8b ec movl.s %esp,%ebp
10043 This matches the hookable function prologue in Win32 API
10044 functions in Microsoft Windows XP Service Pack 2 and newer.
10045 Wine uses this to enable Windows apps to hook the Win32 API
10046 functions provided by Wine.
10048 What that means is that we've already set up the frame pointer. */
10050 if (frame_pointer_needed
10051 && !(crtl->drap_reg && crtl->stack_realign_needed))
10055 /* We've decided to use the frame pointer already set up.
10056 Describe this to the unwinder by pretending that both
10057 push and mov insns happen right here.
10059 Putting the unwind info here at the end of the ms_hook
10060 is done so that we can make absolutely certain we get
10061 the required byte sequence at the start of the function,
10062 rather than relying on an assembler that can produce
10063 the exact encoding required.
10065 However it does mean (in the unpatched case) that we have
10066 a 1 insn window where the asynchronous unwind info is
10067 incorrect. However, if we placed the unwind info at
10068 its correct location we would have incorrect unwind info
10069 in the patched case. Which is probably all moot since
10070 I don't expect Wine generates dwarf2 unwind info for the
10071 system libraries that use this feature. */
10073 insn = emit_insn (gen_blockage ());
10075 push = gen_push (hard_frame_pointer_rtx);
10076 mov = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
10077 stack_pointer_rtx);
10078 RTX_FRAME_RELATED_P (push) = 1;
10079 RTX_FRAME_RELATED_P (mov) = 1;
10081 RTX_FRAME_RELATED_P (insn) = 1;
10082 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10083 gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, push, mov)));
10085 /* Note that gen_push incremented m->fs.cfa_offset, even
10086 though we didn't emit the push insn here. */
10087 m->fs.cfa_reg = hard_frame_pointer_rtx;
10088 m->fs.fp_offset = m->fs.cfa_offset;
10089 m->fs.fp_valid = true;
10093 /* The frame pointer is not needed so pop %ebp again.
10094 This leaves us with a pristine state. */
10095 emit_insn (gen_pop (hard_frame_pointer_rtx));
10099 /* The first insn of a function that accepts its static chain on the
10100 stack is to push the register that would be filled in by a direct
10101 call. This insn will be skipped by the trampoline. */
10102 else if (ix86_static_chain_on_stack)
10104 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
10105 emit_insn (gen_blockage ());
10107 /* We don't want to interpret this push insn as a register save,
10108 only as a stack adjustment. The real copy of the register as
10109 a save will be done later, if needed. */
10110 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
10111 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
10112 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
10113 RTX_FRAME_RELATED_P (insn) = 1;
10116 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
10117 of DRAP is needed and stack realignment is really needed after reload */
10118 if (stack_realign_drap)
10120 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10122 /* Only need to push parameter pointer reg if it is caller saved. */
10123 if (!call_used_regs[REGNO (crtl->drap_reg)])
10125 /* Push arg pointer reg */
10126 insn = emit_insn (gen_push (crtl->drap_reg));
10127 RTX_FRAME_RELATED_P (insn) = 1;
10130 /* Grab the argument pointer. */
10131 t = plus_constant (stack_pointer_rtx, m->fs.sp_offset);
10132 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10133 RTX_FRAME_RELATED_P (insn) = 1;
10134 m->fs.cfa_reg = crtl->drap_reg;
10135 m->fs.cfa_offset = 0;
10137 /* Align the stack. */
10138 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10140 GEN_INT (-align_bytes)));
10141 RTX_FRAME_RELATED_P (insn) = 1;
10143 /* Replicate the return address on the stack so that return
10144 address can be reached via (argp - 1) slot. This is needed
10145 to implement macro RETURN_ADDR_RTX and intrinsic function
10146 expand_builtin_return_addr etc. */
10147 t = plus_constant (crtl->drap_reg, -UNITS_PER_WORD);
10148 t = gen_frame_mem (Pmode, t);
10149 insn = emit_insn (gen_push (t));
10150 RTX_FRAME_RELATED_P (insn) = 1;
10152 /* For the purposes of frame and register save area addressing,
10153 we've started over with a new frame. */
10154 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
10155 m->fs.realigned = true;
10158 if (frame_pointer_needed && !m->fs.fp_valid)
10160 /* Note: AT&T enter does NOT have reversed args. Enter is probably
10161 slower on all targets. Also sdb doesn't like it. */
10162 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
10163 RTX_FRAME_RELATED_P (insn) = 1;
10165 if (m->fs.sp_offset == frame.hard_frame_pointer_offset)
10167 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10168 RTX_FRAME_RELATED_P (insn) = 1;
10170 if (m->fs.cfa_reg == stack_pointer_rtx)
10171 m->fs.cfa_reg = hard_frame_pointer_rtx;
10172 m->fs.fp_offset = m->fs.sp_offset;
10173 m->fs.fp_valid = true;
10177 int_registers_saved = (frame.nregs == 0);
10179 if (!int_registers_saved)
10181 /* If saving registers via PUSH, do so now. */
10182 if (!frame.save_regs_using_mov)
10184 ix86_emit_save_regs ();
10185 int_registers_saved = true;
10186 gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
10189 /* When using red zone we may start register saving before allocating
10190 the stack frame saving one cycle of the prologue. However, avoid
10191 doing this if we have to probe the stack; at least on x86_64 the
10192 stack probe can turn into a call that clobbers a red zone location. */
10193 else if (ix86_using_red_zone ()
10194 && (! TARGET_STACK_PROBE
10195 || frame.stack_pointer_offset < CHECK_STACK_LIMIT))
10197 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10198 int_registers_saved = true;
10202 if (stack_realign_fp)
10204 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10205 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
10207 /* The computation of the size of the re-aligned stack frame means
10208 that we must allocate the size of the register save area before
10209 performing the actual alignment. Otherwise we cannot guarantee
10210 that there's enough storage above the realignment point. */
10211 if (m->fs.sp_offset != frame.sse_reg_save_offset)
10212 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10213 GEN_INT (m->fs.sp_offset
10214 - frame.sse_reg_save_offset),
10217 /* Align the stack. */
10218 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10220 GEN_INT (-align_bytes)));
10222 /* For the purposes of register save area addressing, the stack
10223 pointer is no longer valid. As for the value of sp_offset,
10224 see ix86_compute_frame_layout, which we need to match in order
10225 to pass verification of stack_pointer_offset at the end. */
10226 m->fs.sp_offset = (m->fs.sp_offset + align_bytes) & -align_bytes;
10227 m->fs.sp_valid = false;
10230 allocate = frame.stack_pointer_offset - m->fs.sp_offset;
10232 if (flag_stack_usage_info)
10234 /* We start to count from ARG_POINTER. */
10235 HOST_WIDE_INT stack_size = frame.stack_pointer_offset;
10237 /* If it was realigned, take into account the fake frame. */
10238 if (stack_realign_drap)
10240 if (ix86_static_chain_on_stack)
10241 stack_size += UNITS_PER_WORD;
10243 if (!call_used_regs[REGNO (crtl->drap_reg)])
10244 stack_size += UNITS_PER_WORD;
10246 /* This over-estimates by 1 minimal-stack-alignment-unit but
10247 mitigates that by counting in the new return address slot. */
10248 current_function_dynamic_stack_size
10249 += crtl->stack_alignment_needed / BITS_PER_UNIT;
10252 current_function_static_stack_size = stack_size;
10255 /* The stack has already been decremented by the instruction calling us
10256 so probe if the size is non-negative to preserve the protection area. */
10257 if (allocate >= 0 && flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
10259 /* We expect the registers to be saved when probes are used. */
10260 gcc_assert (int_registers_saved);
10262 if (STACK_CHECK_MOVING_SP)
10264 ix86_adjust_stack_and_probe (allocate);
10269 HOST_WIDE_INT size = allocate;
10271 if (TARGET_64BIT && size >= (HOST_WIDE_INT) 0x80000000)
10272 size = 0x80000000 - STACK_CHECK_PROTECT - 1;
10274 if (TARGET_STACK_PROBE)
10275 ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
10277 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
10283 else if (!ix86_target_stack_probe ()
10284 || frame.stack_pointer_offset < CHECK_STACK_LIMIT)
10286 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10287 GEN_INT (-allocate), -1,
10288 m->fs.cfa_reg == stack_pointer_rtx);
10292 rtx eax = gen_rtx_REG (Pmode, AX_REG);
10294 rtx (*adjust_stack_insn)(rtx, rtx, rtx);
10296 bool eax_live = false;
10297 bool r10_live = false;
10300 r10_live = (DECL_STATIC_CHAIN (current_function_decl) != 0);
10301 if (!TARGET_64BIT_MS_ABI)
10302 eax_live = ix86_eax_live_at_start_p ();
10306 emit_insn (gen_push (eax));
10307 allocate -= UNITS_PER_WORD;
10311 r10 = gen_rtx_REG (Pmode, R10_REG);
10312 emit_insn (gen_push (r10));
10313 allocate -= UNITS_PER_WORD;
10316 emit_move_insn (eax, GEN_INT (allocate));
10317 emit_insn (ix86_gen_allocate_stack_worker (eax, eax));
10319 /* Use the fact that AX still contains ALLOCATE. */
10320 adjust_stack_insn = (TARGET_64BIT
10321 ? gen_pro_epilogue_adjust_stack_di_sub
10322 : gen_pro_epilogue_adjust_stack_si_sub);
10324 insn = emit_insn (adjust_stack_insn (stack_pointer_rtx,
10325 stack_pointer_rtx, eax));
10327 /* Note that SEH directives need to continue tracking the stack
10328 pointer even after the frame pointer has been set up. */
10329 if (m->fs.cfa_reg == stack_pointer_rtx || TARGET_SEH)
10331 if (m->fs.cfa_reg == stack_pointer_rtx)
10332 m->fs.cfa_offset += allocate;
10334 RTX_FRAME_RELATED_P (insn) = 1;
10335 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10336 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10337 plus_constant (stack_pointer_rtx,
10340 m->fs.sp_offset += allocate;
10342 if (r10_live && eax_live)
10344 t = choose_baseaddr (m->fs.sp_offset - allocate);
10345 emit_move_insn (r10, gen_frame_mem (Pmode, t));
10346 t = choose_baseaddr (m->fs.sp_offset - allocate - UNITS_PER_WORD);
10347 emit_move_insn (eax, gen_frame_mem (Pmode, t));
10349 else if (eax_live || r10_live)
10351 t = choose_baseaddr (m->fs.sp_offset - allocate);
10352 emit_move_insn ((eax_live ? eax : r10), gen_frame_mem (Pmode, t));
10355 gcc_assert (m->fs.sp_offset == frame.stack_pointer_offset);
10357 /* If we havn't already set up the frame pointer, do so now. */
10358 if (frame_pointer_needed && !m->fs.fp_valid)
10360 insn = ix86_gen_add3 (hard_frame_pointer_rtx, stack_pointer_rtx,
10361 GEN_INT (frame.stack_pointer_offset
10362 - frame.hard_frame_pointer_offset));
10363 insn = emit_insn (insn);
10364 RTX_FRAME_RELATED_P (insn) = 1;
10365 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
10367 if (m->fs.cfa_reg == stack_pointer_rtx)
10368 m->fs.cfa_reg = hard_frame_pointer_rtx;
10369 m->fs.fp_offset = frame.hard_frame_pointer_offset;
10370 m->fs.fp_valid = true;
10373 if (!int_registers_saved)
10374 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10375 if (frame.nsseregs)
10376 ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
10378 pic_reg_used = false;
10379 if (pic_offset_table_rtx
10380 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
10383 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
10385 if (alt_pic_reg_used != INVALID_REGNUM)
10386 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
10388 pic_reg_used = true;
10395 if (ix86_cmodel == CM_LARGE_PIC)
10397 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
10398 rtx label = gen_label_rtx ();
10399 emit_label (label);
10400 LABEL_PRESERVE_P (label) = 1;
10401 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
10402 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
10403 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
10404 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
10405 pic_offset_table_rtx, tmp_reg));
10408 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
10412 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
10413 RTX_FRAME_RELATED_P (insn) = 1;
10414 add_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL_RTX);
10418 /* In the pic_reg_used case, make sure that the got load isn't deleted
10419 when mcount needs it. Blockage to avoid call movement across mcount
10420 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
10422 if (crtl->profile && !flag_fentry && pic_reg_used)
10423 emit_insn (gen_prologue_use (pic_offset_table_rtx));
10425 if (crtl->drap_reg && !crtl->stack_realign_needed)
10427 /* vDRAP is setup but after reload it turns out stack realign
10428 isn't necessary, here we will emit prologue to setup DRAP
10429 without stack realign adjustment */
10430 t = choose_baseaddr (0);
10431 emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10434 /* Prevent instructions from being scheduled into register save push
10435 sequence when access to the redzone area is done through frame pointer.
10436 The offset between the frame pointer and the stack pointer is calculated
10437 relative to the value of the stack pointer at the end of the function
10438 prologue, and moving instructions that access redzone area via frame
10439 pointer inside push sequence violates this assumption. */
10440 if (frame_pointer_needed && frame.red_zone_size)
10441 emit_insn (gen_memory_blockage ());
10443 /* Emit cld instruction if stringops are used in the function. */
10444 if (TARGET_CLD && ix86_current_function_needs_cld)
10445 emit_insn (gen_cld ());
10447 /* SEH requires that the prologue end within 256 bytes of the start of
10448 the function. Prevent instruction schedules that would extend that.
10449 Further, prevent alloca modifications to the stack pointer from being
10450 combined with prologue modifications. */
10452 emit_insn (gen_prologue_use (stack_pointer_rtx));
10455 /* Emit code to restore REG using a POP insn. */
10458 ix86_emit_restore_reg_using_pop (rtx reg)
10460 struct machine_function *m = cfun->machine;
10461 rtx insn = emit_insn (gen_pop (reg));
10463 ix86_add_cfa_restore_note (insn, reg, m->fs.sp_offset);
10464 m->fs.sp_offset -= UNITS_PER_WORD;
10466 if (m->fs.cfa_reg == crtl->drap_reg
10467 && REGNO (reg) == REGNO (crtl->drap_reg))
10469 /* Previously we'd represented the CFA as an expression
10470 like *(%ebp - 8). We've just popped that value from
10471 the stack, which means we need to reset the CFA to
10472 the drap register. This will remain until we restore
10473 the stack pointer. */
10474 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10475 RTX_FRAME_RELATED_P (insn) = 1;
10477 /* This means that the DRAP register is valid for addressing too. */
10478 m->fs.drap_valid = true;
10482 if (m->fs.cfa_reg == stack_pointer_rtx)
10484 rtx x = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
10485 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10486 add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
10487 RTX_FRAME_RELATED_P (insn) = 1;
10489 m->fs.cfa_offset -= UNITS_PER_WORD;
10492 /* When the frame pointer is the CFA, and we pop it, we are
10493 swapping back to the stack pointer as the CFA. This happens
10494 for stack frames that don't allocate other data, so we assume
10495 the stack pointer is now pointing at the return address, i.e.
10496 the function entry state, which makes the offset be 1 word. */
10497 if (reg == hard_frame_pointer_rtx)
10499 m->fs.fp_valid = false;
10500 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10502 m->fs.cfa_reg = stack_pointer_rtx;
10503 m->fs.cfa_offset -= UNITS_PER_WORD;
10505 add_reg_note (insn, REG_CFA_DEF_CFA,
10506 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10507 GEN_INT (m->fs.cfa_offset)));
10508 RTX_FRAME_RELATED_P (insn) = 1;
10513 /* Emit code to restore saved registers using POP insns. */
10516 ix86_emit_restore_regs_using_pop (void)
10518 unsigned int regno;
10520 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10521 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
10522 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno));
10525 /* Emit code and notes for the LEAVE instruction. */
10528 ix86_emit_leave (void)
10530 struct machine_function *m = cfun->machine;
10531 rtx insn = emit_insn (ix86_gen_leave ());
10533 ix86_add_queued_cfa_restore_notes (insn);
10535 gcc_assert (m->fs.fp_valid);
10536 m->fs.sp_valid = true;
10537 m->fs.sp_offset = m->fs.fp_offset - UNITS_PER_WORD;
10538 m->fs.fp_valid = false;
10540 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10542 m->fs.cfa_reg = stack_pointer_rtx;
10543 m->fs.cfa_offset = m->fs.sp_offset;
10545 add_reg_note (insn, REG_CFA_DEF_CFA,
10546 plus_constant (stack_pointer_rtx, m->fs.sp_offset));
10547 RTX_FRAME_RELATED_P (insn) = 1;
10548 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx,
10553 /* Emit code to restore saved registers using MOV insns.
10554 First register is restored from CFA - CFA_OFFSET. */
10556 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset,
10557 bool maybe_eh_return)
10559 struct machine_function *m = cfun->machine;
10560 unsigned int regno;
10562 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10563 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10565 rtx reg = gen_rtx_REG (Pmode, regno);
10568 mem = choose_baseaddr (cfa_offset);
10569 mem = gen_frame_mem (Pmode, mem);
10570 insn = emit_move_insn (reg, mem);
10572 if (m->fs.cfa_reg == crtl->drap_reg && regno == REGNO (crtl->drap_reg))
10574 /* Previously we'd represented the CFA as an expression
10575 like *(%ebp - 8). We've just popped that value from
10576 the stack, which means we need to reset the CFA to
10577 the drap register. This will remain until we restore
10578 the stack pointer. */
10579 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10580 RTX_FRAME_RELATED_P (insn) = 1;
10582 /* This means that the DRAP register is valid for addressing. */
10583 m->fs.drap_valid = true;
10586 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10588 cfa_offset -= UNITS_PER_WORD;
10592 /* Emit code to restore saved registers using MOV insns.
10593 First register is restored from CFA - CFA_OFFSET. */
10595 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset,
10596 bool maybe_eh_return)
10598 unsigned int regno;
10600 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10601 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10603 rtx reg = gen_rtx_REG (V4SFmode, regno);
10606 mem = choose_baseaddr (cfa_offset);
10607 mem = gen_rtx_MEM (V4SFmode, mem);
10608 set_mem_align (mem, 128);
10609 emit_move_insn (reg, mem);
10611 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10617 /* Restore function stack, frame, and registers. */
10620 ix86_expand_epilogue (int style)
10622 struct machine_function *m = cfun->machine;
10623 struct machine_frame_state frame_state_save = m->fs;
10624 struct ix86_frame frame;
10625 bool restore_regs_via_mov;
10628 ix86_finalize_stack_realign_flags ();
10629 ix86_compute_frame_layout (&frame);
10631 m->fs.sp_valid = (!frame_pointer_needed
10632 || (current_function_sp_is_unchanging
10633 && !stack_realign_fp));
10634 gcc_assert (!m->fs.sp_valid
10635 || m->fs.sp_offset == frame.stack_pointer_offset);
10637 /* The FP must be valid if the frame pointer is present. */
10638 gcc_assert (frame_pointer_needed == m->fs.fp_valid);
10639 gcc_assert (!m->fs.fp_valid
10640 || m->fs.fp_offset == frame.hard_frame_pointer_offset);
10642 /* We must have *some* valid pointer to the stack frame. */
10643 gcc_assert (m->fs.sp_valid || m->fs.fp_valid);
10645 /* The DRAP is never valid at this point. */
10646 gcc_assert (!m->fs.drap_valid);
10648 /* See the comment about red zone and frame
10649 pointer usage in ix86_expand_prologue. */
10650 if (frame_pointer_needed && frame.red_zone_size)
10651 emit_insn (gen_memory_blockage ());
10653 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
10654 gcc_assert (!using_drap || m->fs.cfa_reg == crtl->drap_reg);
10656 /* Determine the CFA offset of the end of the red-zone. */
10657 m->fs.red_zone_offset = 0;
10658 if (ix86_using_red_zone () && crtl->args.pops_args < 65536)
10660 /* The red-zone begins below the return address. */
10661 m->fs.red_zone_offset = RED_ZONE_SIZE + UNITS_PER_WORD;
10663 /* When the register save area is in the aligned portion of
10664 the stack, determine the maximum runtime displacement that
10665 matches up with the aligned frame. */
10666 if (stack_realign_drap)
10667 m->fs.red_zone_offset -= (crtl->stack_alignment_needed / BITS_PER_UNIT
10671 /* Special care must be taken for the normal return case of a function
10672 using eh_return: the eax and edx registers are marked as saved, but
10673 not restored along this path. Adjust the save location to match. */
10674 if (crtl->calls_eh_return && style != 2)
10675 frame.reg_save_offset -= 2 * UNITS_PER_WORD;
10677 /* EH_RETURN requires the use of moves to function properly. */
10678 if (crtl->calls_eh_return)
10679 restore_regs_via_mov = true;
10680 /* SEH requires the use of pops to identify the epilogue. */
10681 else if (TARGET_SEH)
10682 restore_regs_via_mov = false;
10683 /* If we're only restoring one register and sp is not valid then
10684 using a move instruction to restore the register since it's
10685 less work than reloading sp and popping the register. */
10686 else if (!m->fs.sp_valid && frame.nregs <= 1)
10687 restore_regs_via_mov = true;
10688 else if (TARGET_EPILOGUE_USING_MOVE
10689 && cfun->machine->use_fast_prologue_epilogue
10690 && (frame.nregs > 1
10691 || m->fs.sp_offset != frame.reg_save_offset))
10692 restore_regs_via_mov = true;
10693 else if (frame_pointer_needed
10695 && m->fs.sp_offset != frame.reg_save_offset)
10696 restore_regs_via_mov = true;
10697 else if (frame_pointer_needed
10698 && TARGET_USE_LEAVE
10699 && cfun->machine->use_fast_prologue_epilogue
10700 && frame.nregs == 1)
10701 restore_regs_via_mov = true;
10703 restore_regs_via_mov = false;
10705 if (restore_regs_via_mov || frame.nsseregs)
10707 /* Ensure that the entire register save area is addressable via
10708 the stack pointer, if we will restore via sp. */
10710 && m->fs.sp_offset > 0x7fffffff
10711 && !(m->fs.fp_valid || m->fs.drap_valid)
10712 && (frame.nsseregs + frame.nregs) != 0)
10714 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10715 GEN_INT (m->fs.sp_offset
10716 - frame.sse_reg_save_offset),
10718 m->fs.cfa_reg == stack_pointer_rtx);
10722 /* If there are any SSE registers to restore, then we have to do it
10723 via moves, since there's obviously no pop for SSE regs. */
10724 if (frame.nsseregs)
10725 ix86_emit_restore_sse_regs_using_mov (frame.sse_reg_save_offset,
10728 if (restore_regs_via_mov)
10733 ix86_emit_restore_regs_using_mov (frame.reg_save_offset, style == 2);
10735 /* eh_return epilogues need %ecx added to the stack pointer. */
10738 rtx insn, sa = EH_RETURN_STACKADJ_RTX;
10740 /* Stack align doesn't work with eh_return. */
10741 gcc_assert (!stack_realign_drap);
10742 /* Neither does regparm nested functions. */
10743 gcc_assert (!ix86_static_chain_on_stack);
10745 if (frame_pointer_needed)
10747 t = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
10748 t = plus_constant (t, m->fs.fp_offset - UNITS_PER_WORD);
10749 emit_insn (gen_rtx_SET (VOIDmode, sa, t));
10751 t = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
10752 insn = emit_move_insn (hard_frame_pointer_rtx, t);
10754 /* Note that we use SA as a temporary CFA, as the return
10755 address is at the proper place relative to it. We
10756 pretend this happens at the FP restore insn because
10757 prior to this insn the FP would be stored at the wrong
10758 offset relative to SA, and after this insn we have no
10759 other reasonable register to use for the CFA. We don't
10760 bother resetting the CFA to the SP for the duration of
10761 the return insn. */
10762 add_reg_note (insn, REG_CFA_DEF_CFA,
10763 plus_constant (sa, UNITS_PER_WORD));
10764 ix86_add_queued_cfa_restore_notes (insn);
10765 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
10766 RTX_FRAME_RELATED_P (insn) = 1;
10768 m->fs.cfa_reg = sa;
10769 m->fs.cfa_offset = UNITS_PER_WORD;
10770 m->fs.fp_valid = false;
10772 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
10773 const0_rtx, style, false);
10777 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
10778 t = plus_constant (t, m->fs.sp_offset - UNITS_PER_WORD);
10779 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, t));
10780 ix86_add_queued_cfa_restore_notes (insn);
10782 gcc_assert (m->fs.cfa_reg == stack_pointer_rtx);
10783 if (m->fs.cfa_offset != UNITS_PER_WORD)
10785 m->fs.cfa_offset = UNITS_PER_WORD;
10786 add_reg_note (insn, REG_CFA_DEF_CFA,
10787 plus_constant (stack_pointer_rtx,
10789 RTX_FRAME_RELATED_P (insn) = 1;
10792 m->fs.sp_offset = UNITS_PER_WORD;
10793 m->fs.sp_valid = true;
10798 /* SEH requires that the function end with (1) a stack adjustment
10799 if necessary, (2) a sequence of pops, and (3) a return or
10800 jump instruction. Prevent insns from the function body from
10801 being scheduled into this sequence. */
10804 /* Prevent a catch region from being adjacent to the standard
10805 epilogue sequence. Unfortuantely crtl->uses_eh_lsda nor
10806 several other flags that would be interesting to test are
10808 if (flag_non_call_exceptions)
10809 emit_insn (gen_nops (const1_rtx));
10811 emit_insn (gen_blockage ());
10814 /* First step is to deallocate the stack frame so that we can
10815 pop the registers. */
10816 if (!m->fs.sp_valid)
10818 pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx,
10819 GEN_INT (m->fs.fp_offset
10820 - frame.reg_save_offset),
10823 else if (m->fs.sp_offset != frame.reg_save_offset)
10825 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10826 GEN_INT (m->fs.sp_offset
10827 - frame.reg_save_offset),
10829 m->fs.cfa_reg == stack_pointer_rtx);
10832 ix86_emit_restore_regs_using_pop ();
10835 /* If we used a stack pointer and haven't already got rid of it,
10837 if (m->fs.fp_valid)
10839 /* If the stack pointer is valid and pointing at the frame
10840 pointer store address, then we only need a pop. */
10841 if (m->fs.sp_valid && m->fs.sp_offset == frame.hfp_save_offset)
10842 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
10843 /* Leave results in shorter dependency chains on CPUs that are
10844 able to grok it fast. */
10845 else if (TARGET_USE_LEAVE
10846 || optimize_function_for_size_p (cfun)
10847 || !cfun->machine->use_fast_prologue_epilogue)
10848 ix86_emit_leave ();
10851 pro_epilogue_adjust_stack (stack_pointer_rtx,
10852 hard_frame_pointer_rtx,
10853 const0_rtx, style, !using_drap);
10854 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
10860 int param_ptr_offset = UNITS_PER_WORD;
10863 gcc_assert (stack_realign_drap);
10865 if (ix86_static_chain_on_stack)
10866 param_ptr_offset += UNITS_PER_WORD;
10867 if (!call_used_regs[REGNO (crtl->drap_reg)])
10868 param_ptr_offset += UNITS_PER_WORD;
10870 insn = emit_insn (gen_rtx_SET
10871 (VOIDmode, stack_pointer_rtx,
10872 gen_rtx_PLUS (Pmode,
10874 GEN_INT (-param_ptr_offset))));
10875 m->fs.cfa_reg = stack_pointer_rtx;
10876 m->fs.cfa_offset = param_ptr_offset;
10877 m->fs.sp_offset = param_ptr_offset;
10878 m->fs.realigned = false;
10880 add_reg_note (insn, REG_CFA_DEF_CFA,
10881 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10882 GEN_INT (param_ptr_offset)));
10883 RTX_FRAME_RELATED_P (insn) = 1;
10885 if (!call_used_regs[REGNO (crtl->drap_reg)])
10886 ix86_emit_restore_reg_using_pop (crtl->drap_reg);
10889 /* At this point the stack pointer must be valid, and we must have
10890 restored all of the registers. We may not have deallocated the
10891 entire stack frame. We've delayed this until now because it may
10892 be possible to merge the local stack deallocation with the
10893 deallocation forced by ix86_static_chain_on_stack. */
10894 gcc_assert (m->fs.sp_valid);
10895 gcc_assert (!m->fs.fp_valid);
10896 gcc_assert (!m->fs.realigned);
10897 if (m->fs.sp_offset != UNITS_PER_WORD)
10899 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10900 GEN_INT (m->fs.sp_offset - UNITS_PER_WORD),
10904 ix86_add_queued_cfa_restore_notes (get_last_insn ());
10906 /* Sibcall epilogues don't want a return instruction. */
10909 m->fs = frame_state_save;
10913 /* Emit vzeroupper if needed. */
10914 if (TARGET_VZEROUPPER
10915 && !TREE_THIS_VOLATILE (cfun->decl)
10916 && !cfun->machine->caller_return_avx256_p)
10917 emit_insn (gen_avx_vzeroupper (GEN_INT (call_no_avx256)));
10919 if (crtl->args.pops_args && crtl->args.size)
10921 rtx popc = GEN_INT (crtl->args.pops_args);
10923 /* i386 can only pop 64K bytes. If asked to pop more, pop return
10924 address, do explicit add, and jump indirectly to the caller. */
10926 if (crtl->args.pops_args >= 65536)
10928 rtx ecx = gen_rtx_REG (SImode, CX_REG);
10931 /* There is no "pascal" calling convention in any 64bit ABI. */
10932 gcc_assert (!TARGET_64BIT);
10934 insn = emit_insn (gen_pop (ecx));
10935 m->fs.cfa_offset -= UNITS_PER_WORD;
10936 m->fs.sp_offset -= UNITS_PER_WORD;
10938 add_reg_note (insn, REG_CFA_ADJUST_CFA,
10939 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
10940 add_reg_note (insn, REG_CFA_REGISTER,
10941 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
10942 RTX_FRAME_RELATED_P (insn) = 1;
10944 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10946 emit_jump_insn (gen_simple_return_indirect_internal (ecx));
10949 emit_jump_insn (gen_simple_return_pop_internal (popc));
10952 emit_jump_insn (gen_simple_return_internal ());
10954 /* Restore the state back to the state from the prologue,
10955 so that it's correct for the next epilogue. */
10956 m->fs = frame_state_save;
10959 /* Reset from the function's potential modifications. */
10962 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10963 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
10965 if (pic_offset_table_rtx)
10966 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
10968 /* Mach-O doesn't support labels at the end of objects, so if
10969 it looks like we might want one, insert a NOP. */
10971 rtx insn = get_last_insn ();
10972 rtx deleted_debug_label = NULL_RTX;
10975 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
10977 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
10978 notes only, instead set their CODE_LABEL_NUMBER to -1,
10979 otherwise there would be code generation differences
10980 in between -g and -g0. */
10981 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
10982 deleted_debug_label = insn;
10983 insn = PREV_INSN (insn);
10988 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
10989 fputs ("\tnop\n", file);
10990 else if (deleted_debug_label)
10991 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
10992 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
10993 CODE_LABEL_NUMBER (insn) = -1;
10999 /* Return a scratch register to use in the split stack prologue. The
11000 split stack prologue is used for -fsplit-stack. It is the first
11001 instructions in the function, even before the regular prologue.
11002 The scratch register can be any caller-saved register which is not
11003 used for parameters or for the static chain. */
11005 static unsigned int
11006 split_stack_prologue_scratch_regno (void)
11015 is_fastcall = (lookup_attribute ("fastcall",
11016 TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
11018 regparm = ix86_function_regparm (TREE_TYPE (cfun->decl), cfun->decl);
11022 if (DECL_STATIC_CHAIN (cfun->decl))
11024 sorry ("-fsplit-stack does not support fastcall with "
11025 "nested function");
11026 return INVALID_REGNUM;
11030 else if (regparm < 3)
11032 if (!DECL_STATIC_CHAIN (cfun->decl))
11038 sorry ("-fsplit-stack does not support 2 register "
11039 " parameters for a nested function");
11040 return INVALID_REGNUM;
11047 /* FIXME: We could make this work by pushing a register
11048 around the addition and comparison. */
11049 sorry ("-fsplit-stack does not support 3 register parameters");
11050 return INVALID_REGNUM;
11055 /* A SYMBOL_REF for the function which allocates new stackspace for
11058 static GTY(()) rtx split_stack_fn;
11060 /* A SYMBOL_REF for the more stack function when using the large
11063 static GTY(()) rtx split_stack_fn_large;
11065 /* Handle -fsplit-stack. These are the first instructions in the
11066 function, even before the regular prologue. */
11069 ix86_expand_split_stack_prologue (void)
11071 struct ix86_frame frame;
11072 HOST_WIDE_INT allocate;
11073 unsigned HOST_WIDE_INT args_size;
11074 rtx label, limit, current, jump_insn, allocate_rtx, call_insn, call_fusage;
11075 rtx scratch_reg = NULL_RTX;
11076 rtx varargs_label = NULL_RTX;
11079 gcc_assert (flag_split_stack && reload_completed);
11081 ix86_finalize_stack_realign_flags ();
11082 ix86_compute_frame_layout (&frame);
11083 allocate = frame.stack_pointer_offset - INCOMING_FRAME_SP_OFFSET;
11085 /* This is the label we will branch to if we have enough stack
11086 space. We expect the basic block reordering pass to reverse this
11087 branch if optimizing, so that we branch in the unlikely case. */
11088 label = gen_label_rtx ();
11090 /* We need to compare the stack pointer minus the frame size with
11091 the stack boundary in the TCB. The stack boundary always gives
11092 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
11093 can compare directly. Otherwise we need to do an addition. */
11095 limit = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
11096 UNSPEC_STACK_CHECK);
11097 limit = gen_rtx_CONST (Pmode, limit);
11098 limit = gen_rtx_MEM (Pmode, limit);
11099 if (allocate < SPLIT_STACK_AVAILABLE)
11100 current = stack_pointer_rtx;
11103 unsigned int scratch_regno;
11106 /* We need a scratch register to hold the stack pointer minus
11107 the required frame size. Since this is the very start of the
11108 function, the scratch register can be any caller-saved
11109 register which is not used for parameters. */
11110 offset = GEN_INT (- allocate);
11111 scratch_regno = split_stack_prologue_scratch_regno ();
11112 if (scratch_regno == INVALID_REGNUM)
11114 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11115 if (!TARGET_64BIT || x86_64_immediate_operand (offset, Pmode))
11117 /* We don't use ix86_gen_add3 in this case because it will
11118 want to split to lea, but when not optimizing the insn
11119 will not be split after this point. */
11120 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11121 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11126 emit_move_insn (scratch_reg, offset);
11127 emit_insn (gen_adddi3 (scratch_reg, scratch_reg,
11128 stack_pointer_rtx));
11130 current = scratch_reg;
11133 ix86_expand_branch (GEU, current, limit, label);
11134 jump_insn = get_last_insn ();
11135 JUMP_LABEL (jump_insn) = label;
11137 /* Mark the jump as very likely to be taken. */
11138 add_reg_note (jump_insn, REG_BR_PROB,
11139 GEN_INT (REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100));
11141 if (split_stack_fn == NULL_RTX)
11142 split_stack_fn = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11143 fn = split_stack_fn;
11145 /* Get more stack space. We pass in the desired stack space and the
11146 size of the arguments to copy to the new stack. In 32-bit mode
11147 we push the parameters; __morestack will return on a new stack
11148 anyhow. In 64-bit mode we pass the parameters in r10 and
11150 allocate_rtx = GEN_INT (allocate);
11151 args_size = crtl->args.size >= 0 ? crtl->args.size : 0;
11152 call_fusage = NULL_RTX;
11157 reg10 = gen_rtx_REG (Pmode, R10_REG);
11158 reg11 = gen_rtx_REG (Pmode, R11_REG);
11160 /* If this function uses a static chain, it will be in %r10.
11161 Preserve it across the call to __morestack. */
11162 if (DECL_STATIC_CHAIN (cfun->decl))
11166 rax = gen_rtx_REG (Pmode, AX_REG);
11167 emit_move_insn (rax, reg10);
11168 use_reg (&call_fusage, rax);
11171 if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
11173 HOST_WIDE_INT argval;
11175 /* When using the large model we need to load the address
11176 into a register, and we've run out of registers. So we
11177 switch to a different calling convention, and we call a
11178 different function: __morestack_large. We pass the
11179 argument size in the upper 32 bits of r10 and pass the
11180 frame size in the lower 32 bits. */
11181 gcc_assert ((allocate & (HOST_WIDE_INT) 0xffffffff) == allocate);
11182 gcc_assert ((args_size & 0xffffffff) == args_size);
11184 if (split_stack_fn_large == NULL_RTX)
11185 split_stack_fn_large =
11186 gen_rtx_SYMBOL_REF (Pmode, "__morestack_large_model");
11188 if (ix86_cmodel == CM_LARGE_PIC)
11192 label = gen_label_rtx ();
11193 emit_label (label);
11194 LABEL_PRESERVE_P (label) = 1;
11195 emit_insn (gen_set_rip_rex64 (reg10, label));
11196 emit_insn (gen_set_got_offset_rex64 (reg11, label));
11197 emit_insn (gen_adddi3 (reg10, reg10, reg11));
11198 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, split_stack_fn_large),
11200 x = gen_rtx_CONST (Pmode, x);
11201 emit_move_insn (reg11, x);
11202 x = gen_rtx_PLUS (Pmode, reg10, reg11);
11203 x = gen_const_mem (Pmode, x);
11204 emit_move_insn (reg11, x);
11207 emit_move_insn (reg11, split_stack_fn_large);
11211 argval = ((args_size << 16) << 16) + allocate;
11212 emit_move_insn (reg10, GEN_INT (argval));
11216 emit_move_insn (reg10, allocate_rtx);
11217 emit_move_insn (reg11, GEN_INT (args_size));
11218 use_reg (&call_fusage, reg11);
11221 use_reg (&call_fusage, reg10);
11225 emit_insn (gen_push (GEN_INT (args_size)));
11226 emit_insn (gen_push (allocate_rtx));
11228 call_insn = ix86_expand_call (NULL_RTX, gen_rtx_MEM (QImode, fn),
11229 GEN_INT (UNITS_PER_WORD), constm1_rtx,
11231 add_function_usage_to (call_insn, call_fusage);
11233 /* In order to make call/return prediction work right, we now need
11234 to execute a return instruction. See
11235 libgcc/config/i386/morestack.S for the details on how this works.
11237 For flow purposes gcc must not see this as a return
11238 instruction--we need control flow to continue at the subsequent
11239 label. Therefore, we use an unspec. */
11240 gcc_assert (crtl->args.pops_args < 65536);
11241 emit_insn (gen_split_stack_return (GEN_INT (crtl->args.pops_args)));
11243 /* If we are in 64-bit mode and this function uses a static chain,
11244 we saved %r10 in %rax before calling _morestack. */
11245 if (TARGET_64BIT && DECL_STATIC_CHAIN (cfun->decl))
11246 emit_move_insn (gen_rtx_REG (Pmode, R10_REG),
11247 gen_rtx_REG (Pmode, AX_REG));
11249 /* If this function calls va_start, we need to store a pointer to
11250 the arguments on the old stack, because they may not have been
11251 all copied to the new stack. At this point the old stack can be
11252 found at the frame pointer value used by __morestack, because
11253 __morestack has set that up before calling back to us. Here we
11254 store that pointer in a scratch register, and in
11255 ix86_expand_prologue we store the scratch register in a stack
11257 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11259 unsigned int scratch_regno;
11263 scratch_regno = split_stack_prologue_scratch_regno ();
11264 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11265 frame_reg = gen_rtx_REG (Pmode, BP_REG);
11269 return address within this function
11270 return address of caller of this function
11272 So we add three words to get to the stack arguments.
11276 return address within this function
11277 first argument to __morestack
11278 second argument to __morestack
11279 return address of caller of this function
11281 So we add five words to get to the stack arguments.
11283 words = TARGET_64BIT ? 3 : 5;
11284 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11285 gen_rtx_PLUS (Pmode, frame_reg,
11286 GEN_INT (words * UNITS_PER_WORD))));
11288 varargs_label = gen_label_rtx ();
11289 emit_jump_insn (gen_jump (varargs_label));
11290 JUMP_LABEL (get_last_insn ()) = varargs_label;
11295 emit_label (label);
11296 LABEL_NUSES (label) = 1;
11298 /* If this function calls va_start, we now have to set the scratch
11299 register for the case where we do not call __morestack. In this
11300 case we need to set it based on the stack pointer. */
11301 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11303 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11304 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11305 GEN_INT (UNITS_PER_WORD))));
11307 emit_label (varargs_label);
11308 LABEL_NUSES (varargs_label) = 1;
11312 /* We may have to tell the dataflow pass that the split stack prologue
11313 is initializing a scratch register. */
11316 ix86_live_on_entry (bitmap regs)
11318 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11320 gcc_assert (flag_split_stack);
11321 bitmap_set_bit (regs, split_stack_prologue_scratch_regno ());
11325 /* Determine if op is suitable SUBREG RTX for address. */
11328 ix86_address_subreg_operand (rtx op)
11330 enum machine_mode mode;
11335 mode = GET_MODE (op);
11337 if (GET_MODE_CLASS (mode) != MODE_INT)
11340 /* Don't allow SUBREGs that span more than a word. It can lead to spill
11341 failures when the register is one word out of a two word structure. */
11342 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
11345 /* Allow only SUBREGs of non-eliminable hard registers. */
11346 return register_no_elim_operand (op, mode);
11349 /* Extract the parts of an RTL expression that is a valid memory address
11350 for an instruction. Return 0 if the structure of the address is
11351 grossly off. Return -1 if the address contains ASHIFT, so it is not
11352 strictly valid, but still used for computing length of lea instruction. */
11355 ix86_decompose_address (rtx addr, struct ix86_address *out)
11357 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
11358 rtx base_reg, index_reg;
11359 HOST_WIDE_INT scale = 1;
11360 rtx scale_rtx = NULL_RTX;
11363 enum ix86_address_seg seg = SEG_DEFAULT;
11365 /* Allow zero-extended SImode addresses,
11366 they will be emitted with addr32 prefix. */
11367 if (TARGET_64BIT && GET_MODE (addr) == DImode)
11369 if (GET_CODE (addr) == ZERO_EXTEND
11370 && GET_MODE (XEXP (addr, 0)) == SImode)
11371 addr = XEXP (addr, 0);
11372 else if (GET_CODE (addr) == AND
11373 && const_32bit_mask (XEXP (addr, 1), DImode))
11375 addr = XEXP (addr, 0);
11377 /* Strip subreg. */
11378 if (GET_CODE (addr) == SUBREG
11379 && GET_MODE (SUBREG_REG (addr)) == SImode)
11380 addr = SUBREG_REG (addr);
11386 else if (GET_CODE (addr) == SUBREG)
11388 if (ix86_address_subreg_operand (SUBREG_REG (addr)))
11393 else if (GET_CODE (addr) == PLUS)
11395 rtx addends[4], op;
11403 addends[n++] = XEXP (op, 1);
11406 while (GET_CODE (op) == PLUS);
11411 for (i = n; i >= 0; --i)
11414 switch (GET_CODE (op))
11419 index = XEXP (op, 0);
11420 scale_rtx = XEXP (op, 1);
11426 index = XEXP (op, 0);
11427 tmp = XEXP (op, 1);
11428 if (!CONST_INT_P (tmp))
11430 scale = INTVAL (tmp);
11431 if ((unsigned HOST_WIDE_INT) scale > 3)
11433 scale = 1 << scale;
11437 if (XINT (op, 1) == UNSPEC_TP
11438 && TARGET_TLS_DIRECT_SEG_REFS
11439 && seg == SEG_DEFAULT)
11440 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
11446 if (!ix86_address_subreg_operand (SUBREG_REG (op)))
11473 else if (GET_CODE (addr) == MULT)
11475 index = XEXP (addr, 0); /* index*scale */
11476 scale_rtx = XEXP (addr, 1);
11478 else if (GET_CODE (addr) == ASHIFT)
11480 /* We're called for lea too, which implements ashift on occasion. */
11481 index = XEXP (addr, 0);
11482 tmp = XEXP (addr, 1);
11483 if (!CONST_INT_P (tmp))
11485 scale = INTVAL (tmp);
11486 if ((unsigned HOST_WIDE_INT) scale > 3)
11488 scale = 1 << scale;
11492 disp = addr; /* displacement */
11498 else if (GET_CODE (index) == SUBREG
11499 && ix86_address_subreg_operand (SUBREG_REG (index)))
11505 /* Extract the integral value of scale. */
11508 if (!CONST_INT_P (scale_rtx))
11510 scale = INTVAL (scale_rtx);
11513 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
11514 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
11516 /* Avoid useless 0 displacement. */
11517 if (disp == const0_rtx && (base || index))
11520 /* Allow arg pointer and stack pointer as index if there is not scaling. */
11521 if (base_reg && index_reg && scale == 1
11522 && (index_reg == arg_pointer_rtx
11523 || index_reg == frame_pointer_rtx
11524 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
11527 tmp = base, base = index, index = tmp;
11528 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
11531 /* Special case: %ebp cannot be encoded as a base without a displacement.
11535 && (base_reg == hard_frame_pointer_rtx
11536 || base_reg == frame_pointer_rtx
11537 || base_reg == arg_pointer_rtx
11538 || (REG_P (base_reg)
11539 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
11540 || REGNO (base_reg) == R13_REG))))
11543 /* Special case: on K6, [%esi] makes the instruction vector decoded.
11544 Avoid this by transforming to [%esi+0].
11545 Reload calls address legitimization without cfun defined, so we need
11546 to test cfun for being non-NULL. */
11547 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
11548 && base_reg && !index_reg && !disp
11549 && REG_P (base_reg) && REGNO (base_reg) == SI_REG)
11552 /* Special case: encode reg+reg instead of reg*2. */
11553 if (!base && index && scale == 2)
11554 base = index, base_reg = index_reg, scale = 1;
11556 /* Special case: scaling cannot be encoded without base or displacement. */
11557 if (!base && !disp && index && scale != 1)
11561 out->index = index;
11563 out->scale = scale;
11569 /* Return cost of the memory address x.
11570 For i386, it is better to use a complex address than let gcc copy
11571 the address into a reg and make a new pseudo. But not if the address
11572 requires to two regs - that would mean more pseudos with longer
11575 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
11577 struct ix86_address parts;
11579 int ok = ix86_decompose_address (x, &parts);
11583 if (parts.base && GET_CODE (parts.base) == SUBREG)
11584 parts.base = SUBREG_REG (parts.base);
11585 if (parts.index && GET_CODE (parts.index) == SUBREG)
11586 parts.index = SUBREG_REG (parts.index);
11588 /* Attempt to minimize number of registers in the address. */
11590 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
11592 && (!REG_P (parts.index)
11593 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
11597 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
11599 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
11600 && parts.base != parts.index)
11603 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
11604 since it's predecode logic can't detect the length of instructions
11605 and it degenerates to vector decoded. Increase cost of such
11606 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
11607 to split such addresses or even refuse such addresses at all.
11609 Following addressing modes are affected:
11614 The first and last case may be avoidable by explicitly coding the zero in
11615 memory address, but I don't have AMD-K6 machine handy to check this
11619 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
11620 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
11621 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
11627 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
11628 this is used for to form addresses to local data when -fPIC is in
11632 darwin_local_data_pic (rtx disp)
11634 return (GET_CODE (disp) == UNSPEC
11635 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
11638 /* Determine if a given RTX is a valid constant. We already know this
11639 satisfies CONSTANT_P. */
11642 ix86_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
11644 switch (GET_CODE (x))
11649 if (GET_CODE (x) == PLUS)
11651 if (!CONST_INT_P (XEXP (x, 1)))
11656 if (TARGET_MACHO && darwin_local_data_pic (x))
11659 /* Only some unspecs are valid as "constants". */
11660 if (GET_CODE (x) == UNSPEC)
11661 switch (XINT (x, 1))
11664 case UNSPEC_GOTOFF:
11665 case UNSPEC_PLTOFF:
11666 return TARGET_64BIT;
11668 case UNSPEC_NTPOFF:
11669 x = XVECEXP (x, 0, 0);
11670 return (GET_CODE (x) == SYMBOL_REF
11671 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11672 case UNSPEC_DTPOFF:
11673 x = XVECEXP (x, 0, 0);
11674 return (GET_CODE (x) == SYMBOL_REF
11675 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
11680 /* We must have drilled down to a symbol. */
11681 if (GET_CODE (x) == LABEL_REF)
11683 if (GET_CODE (x) != SYMBOL_REF)
11688 /* TLS symbols are never valid. */
11689 if (SYMBOL_REF_TLS_MODEL (x))
11692 /* DLLIMPORT symbols are never valid. */
11693 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11694 && SYMBOL_REF_DLLIMPORT_P (x))
11698 /* mdynamic-no-pic */
11699 if (MACHO_DYNAMIC_NO_PIC_P)
11700 return machopic_symbol_defined_p (x);
11705 if (GET_MODE (x) == TImode
11706 && x != CONST0_RTX (TImode)
11712 if (!standard_sse_constant_p (x))
11719 /* Otherwise we handle everything else in the move patterns. */
11723 /* Determine if it's legal to put X into the constant pool. This
11724 is not possible for the address of thread-local symbols, which
11725 is checked above. */
11728 ix86_cannot_force_const_mem (enum machine_mode mode, rtx x)
11730 /* We can always put integral constants and vectors in memory. */
11731 switch (GET_CODE (x))
11741 return !ix86_legitimate_constant_p (mode, x);
11745 /* Nonzero if the constant value X is a legitimate general operand
11746 when generating PIC code. It is given that flag_pic is on and
11747 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
11750 legitimate_pic_operand_p (rtx x)
11754 switch (GET_CODE (x))
11757 inner = XEXP (x, 0);
11758 if (GET_CODE (inner) == PLUS
11759 && CONST_INT_P (XEXP (inner, 1)))
11760 inner = XEXP (inner, 0);
11762 /* Only some unspecs are valid as "constants". */
11763 if (GET_CODE (inner) == UNSPEC)
11764 switch (XINT (inner, 1))
11767 case UNSPEC_GOTOFF:
11768 case UNSPEC_PLTOFF:
11769 return TARGET_64BIT;
11771 x = XVECEXP (inner, 0, 0);
11772 return (GET_CODE (x) == SYMBOL_REF
11773 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11774 case UNSPEC_MACHOPIC_OFFSET:
11775 return legitimate_pic_address_disp_p (x);
11783 return legitimate_pic_address_disp_p (x);
11790 /* Determine if a given CONST RTX is a valid memory displacement
11794 legitimate_pic_address_disp_p (rtx disp)
11798 /* In 64bit mode we can allow direct addresses of symbols and labels
11799 when they are not dynamic symbols. */
11802 rtx op0 = disp, op1;
11804 switch (GET_CODE (disp))
11810 if (GET_CODE (XEXP (disp, 0)) != PLUS)
11812 op0 = XEXP (XEXP (disp, 0), 0);
11813 op1 = XEXP (XEXP (disp, 0), 1);
11814 if (!CONST_INT_P (op1)
11815 || INTVAL (op1) >= 16*1024*1024
11816 || INTVAL (op1) < -16*1024*1024)
11818 if (GET_CODE (op0) == LABEL_REF)
11820 if (GET_CODE (op0) != SYMBOL_REF)
11825 /* TLS references should always be enclosed in UNSPEC. */
11826 if (SYMBOL_REF_TLS_MODEL (op0))
11828 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
11829 && ix86_cmodel != CM_LARGE_PIC)
11837 if (GET_CODE (disp) != CONST)
11839 disp = XEXP (disp, 0);
11843 /* We are unsafe to allow PLUS expressions. This limit allowed distance
11844 of GOT tables. We should not need these anyway. */
11845 if (GET_CODE (disp) != UNSPEC
11846 || (XINT (disp, 1) != UNSPEC_GOTPCREL
11847 && XINT (disp, 1) != UNSPEC_GOTOFF
11848 && XINT (disp, 1) != UNSPEC_PCREL
11849 && XINT (disp, 1) != UNSPEC_PLTOFF))
11852 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
11853 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
11859 if (GET_CODE (disp) == PLUS)
11861 if (!CONST_INT_P (XEXP (disp, 1)))
11863 disp = XEXP (disp, 0);
11867 if (TARGET_MACHO && darwin_local_data_pic (disp))
11870 if (GET_CODE (disp) != UNSPEC)
11873 switch (XINT (disp, 1))
11878 /* We need to check for both symbols and labels because VxWorks loads
11879 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
11881 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
11882 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
11883 case UNSPEC_GOTOFF:
11884 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
11885 While ABI specify also 32bit relocation but we don't produce it in
11886 small PIC model at all. */
11887 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
11888 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
11890 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
11892 case UNSPEC_GOTTPOFF:
11893 case UNSPEC_GOTNTPOFF:
11894 case UNSPEC_INDNTPOFF:
11897 disp = XVECEXP (disp, 0, 0);
11898 return (GET_CODE (disp) == SYMBOL_REF
11899 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
11900 case UNSPEC_NTPOFF:
11901 disp = XVECEXP (disp, 0, 0);
11902 return (GET_CODE (disp) == SYMBOL_REF
11903 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
11904 case UNSPEC_DTPOFF:
11905 disp = XVECEXP (disp, 0, 0);
11906 return (GET_CODE (disp) == SYMBOL_REF
11907 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
11913 /* Recognizes RTL expressions that are valid memory addresses for an
11914 instruction. The MODE argument is the machine mode for the MEM
11915 expression that wants to use this address.
11917 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
11918 convert common non-canonical forms to canonical form so that they will
11922 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
11923 rtx addr, bool strict)
11925 struct ix86_address parts;
11926 rtx base, index, disp;
11927 HOST_WIDE_INT scale;
11929 if (ix86_decompose_address (addr, &parts) <= 0)
11930 /* Decomposition failed. */
11934 index = parts.index;
11936 scale = parts.scale;
11938 /* Validate base register. */
11945 else if (GET_CODE (base) == SUBREG && REG_P (SUBREG_REG (base)))
11946 reg = SUBREG_REG (base);
11948 /* Base is not a register. */
11951 if (GET_MODE (base) != SImode && GET_MODE (base) != DImode)
11954 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
11955 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
11956 /* Base is not valid. */
11960 /* Validate index register. */
11967 else if (GET_CODE (index) == SUBREG && REG_P (SUBREG_REG (index)))
11968 reg = SUBREG_REG (index);
11970 /* Index is not a register. */
11973 if (GET_MODE (index) != SImode && GET_MODE (index) != DImode)
11976 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
11977 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
11978 /* Index is not valid. */
11982 /* Index and base should have the same mode. */
11984 && GET_MODE (base) != GET_MODE (index))
11987 /* Validate scale factor. */
11991 /* Scale without index. */
11994 if (scale != 2 && scale != 4 && scale != 8)
11995 /* Scale is not a valid multiplier. */
11999 /* Validate displacement. */
12002 if (GET_CODE (disp) == CONST
12003 && GET_CODE (XEXP (disp, 0)) == UNSPEC
12004 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
12005 switch (XINT (XEXP (disp, 0), 1))
12007 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
12008 used. While ABI specify also 32bit relocations, we don't produce
12009 them at all and use IP relative instead. */
12011 case UNSPEC_GOTOFF:
12012 gcc_assert (flag_pic);
12014 goto is_legitimate_pic;
12016 /* 64bit address unspec. */
12019 case UNSPEC_GOTPCREL:
12021 gcc_assert (flag_pic);
12022 goto is_legitimate_pic;
12024 case UNSPEC_GOTTPOFF:
12025 case UNSPEC_GOTNTPOFF:
12026 case UNSPEC_INDNTPOFF:
12027 case UNSPEC_NTPOFF:
12028 case UNSPEC_DTPOFF:
12031 case UNSPEC_STACK_CHECK:
12032 gcc_assert (flag_split_stack);
12036 /* Invalid address unspec. */
12040 else if (SYMBOLIC_CONST (disp)
12044 && MACHOPIC_INDIRECT
12045 && !machopic_operand_p (disp)
12051 if (TARGET_64BIT && (index || base))
12053 /* foo@dtpoff(%rX) is ok. */
12054 if (GET_CODE (disp) != CONST
12055 || GET_CODE (XEXP (disp, 0)) != PLUS
12056 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
12057 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
12058 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
12059 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
12060 /* Non-constant pic memory reference. */
12063 else if ((!TARGET_MACHO || flag_pic)
12064 && ! legitimate_pic_address_disp_p (disp))
12065 /* Displacement is an invalid pic construct. */
12068 else if (MACHO_DYNAMIC_NO_PIC_P
12069 && !ix86_legitimate_constant_p (Pmode, disp))
12070 /* displacment must be referenced via non_lazy_pointer */
12074 /* This code used to verify that a symbolic pic displacement
12075 includes the pic_offset_table_rtx register.
12077 While this is good idea, unfortunately these constructs may
12078 be created by "adds using lea" optimization for incorrect
12087 This code is nonsensical, but results in addressing
12088 GOT table with pic_offset_table_rtx base. We can't
12089 just refuse it easily, since it gets matched by
12090 "addsi3" pattern, that later gets split to lea in the
12091 case output register differs from input. While this
12092 can be handled by separate addsi pattern for this case
12093 that never results in lea, this seems to be easier and
12094 correct fix for crash to disable this test. */
12096 else if (GET_CODE (disp) != LABEL_REF
12097 && !CONST_INT_P (disp)
12098 && (GET_CODE (disp) != CONST
12099 || !ix86_legitimate_constant_p (Pmode, disp))
12100 && (GET_CODE (disp) != SYMBOL_REF
12101 || !ix86_legitimate_constant_p (Pmode, disp)))
12102 /* Displacement is not constant. */
12104 else if (TARGET_64BIT
12105 && !x86_64_immediate_operand (disp, VOIDmode))
12106 /* Displacement is out of range. */
12110 /* Everything looks valid. */
12114 /* Determine if a given RTX is a valid constant address. */
12117 constant_address_p (rtx x)
12119 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
12122 /* Return a unique alias set for the GOT. */
12124 static alias_set_type
12125 ix86_GOT_alias_set (void)
12127 static alias_set_type set = -1;
12129 set = new_alias_set ();
12133 /* Return a legitimate reference for ORIG (an address) using the
12134 register REG. If REG is 0, a new pseudo is generated.
12136 There are two types of references that must be handled:
12138 1. Global data references must load the address from the GOT, via
12139 the PIC reg. An insn is emitted to do this load, and the reg is
12142 2. Static data references, constant pool addresses, and code labels
12143 compute the address as an offset from the GOT, whose base is in
12144 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
12145 differentiate them from global data objects. The returned
12146 address is the PIC reg + an unspec constant.
12148 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
12149 reg also appears in the address. */
12152 legitimize_pic_address (rtx orig, rtx reg)
12155 rtx new_rtx = orig;
12159 if (TARGET_MACHO && !TARGET_64BIT)
12162 reg = gen_reg_rtx (Pmode);
12163 /* Use the generic Mach-O PIC machinery. */
12164 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
12168 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
12170 else if (TARGET_64BIT
12171 && ix86_cmodel != CM_SMALL_PIC
12172 && gotoff_operand (addr, Pmode))
12175 /* This symbol may be referenced via a displacement from the PIC
12176 base address (@GOTOFF). */
12178 if (reload_in_progress)
12179 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12180 if (GET_CODE (addr) == CONST)
12181 addr = XEXP (addr, 0);
12182 if (GET_CODE (addr) == PLUS)
12184 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12186 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12189 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12190 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12192 tmpreg = gen_reg_rtx (Pmode);
12195 emit_move_insn (tmpreg, new_rtx);
12199 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
12200 tmpreg, 1, OPTAB_DIRECT);
12203 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
12205 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
12207 /* This symbol may be referenced via a displacement from the PIC
12208 base address (@GOTOFF). */
12210 if (reload_in_progress)
12211 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12212 if (GET_CODE (addr) == CONST)
12213 addr = XEXP (addr, 0);
12214 if (GET_CODE (addr) == PLUS)
12216 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
12218 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
12221 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
12222 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12223 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12227 emit_move_insn (reg, new_rtx);
12231 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
12232 /* We can't use @GOTOFF for text labels on VxWorks;
12233 see gotoff_operand. */
12234 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
12236 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12238 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
12239 return legitimize_dllimport_symbol (addr, true);
12240 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
12241 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
12242 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
12244 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
12245 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
12249 /* For x64 PE-COFF there is no GOT table. So we use address
12251 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12253 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_PCREL);
12254 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12257 reg = gen_reg_rtx (Pmode);
12258 emit_move_insn (reg, new_rtx);
12261 else if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
12263 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
12264 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12265 new_rtx = gen_const_mem (Pmode, new_rtx);
12266 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12269 reg = gen_reg_rtx (Pmode);
12270 /* Use directly gen_movsi, otherwise the address is loaded
12271 into register for CSE. We don't want to CSE this addresses,
12272 instead we CSE addresses from the GOT table, so skip this. */
12273 emit_insn (gen_movsi (reg, new_rtx));
12278 /* This symbol must be referenced via a load from the
12279 Global Offset Table (@GOT). */
12281 if (reload_in_progress)
12282 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12283 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
12284 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12286 new_rtx = force_reg (Pmode, new_rtx);
12287 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12288 new_rtx = gen_const_mem (Pmode, new_rtx);
12289 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12292 reg = gen_reg_rtx (Pmode);
12293 emit_move_insn (reg, new_rtx);
12299 if (CONST_INT_P (addr)
12300 && !x86_64_immediate_operand (addr, VOIDmode))
12304 emit_move_insn (reg, addr);
12308 new_rtx = force_reg (Pmode, addr);
12310 else if (GET_CODE (addr) == CONST)
12312 addr = XEXP (addr, 0);
12314 /* We must match stuff we generate before. Assume the only
12315 unspecs that can get here are ours. Not that we could do
12316 anything with them anyway.... */
12317 if (GET_CODE (addr) == UNSPEC
12318 || (GET_CODE (addr) == PLUS
12319 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
12321 gcc_assert (GET_CODE (addr) == PLUS);
12323 if (GET_CODE (addr) == PLUS)
12325 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
12327 /* Check first to see if this is a constant offset from a @GOTOFF
12328 symbol reference. */
12329 if (gotoff_operand (op0, Pmode)
12330 && CONST_INT_P (op1))
12334 if (reload_in_progress)
12335 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12336 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
12338 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
12339 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12340 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12344 emit_move_insn (reg, new_rtx);
12350 if (INTVAL (op1) < -16*1024*1024
12351 || INTVAL (op1) >= 16*1024*1024)
12353 if (!x86_64_immediate_operand (op1, Pmode))
12354 op1 = force_reg (Pmode, op1);
12355 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
12361 base = legitimize_pic_address (XEXP (addr, 0), reg);
12362 new_rtx = legitimize_pic_address (XEXP (addr, 1),
12363 base == reg ? NULL_RTX : reg);
12365 if (CONST_INT_P (new_rtx))
12366 new_rtx = plus_constant (base, INTVAL (new_rtx));
12369 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
12371 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
12372 new_rtx = XEXP (new_rtx, 1);
12374 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
12382 /* Load the thread pointer. If TO_REG is true, force it into a register. */
12385 get_thread_pointer (bool to_reg)
12387 rtx tp = gen_rtx_UNSPEC (ptr_mode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
12389 if (GET_MODE (tp) != Pmode)
12390 tp = convert_to_mode (Pmode, tp, 1);
12393 tp = copy_addr_to_reg (tp);
12398 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12400 static GTY(()) rtx ix86_tls_symbol;
12403 ix86_tls_get_addr (void)
12405 if (!ix86_tls_symbol)
12408 = ((TARGET_ANY_GNU_TLS && !TARGET_64BIT)
12409 ? "___tls_get_addr" : "__tls_get_addr");
12411 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, sym);
12414 return ix86_tls_symbol;
12417 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
12419 static GTY(()) rtx ix86_tls_module_base_symbol;
12422 ix86_tls_module_base (void)
12424 if (!ix86_tls_module_base_symbol)
12426 ix86_tls_module_base_symbol
12427 = gen_rtx_SYMBOL_REF (Pmode, "_TLS_MODULE_BASE_");
12429 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
12430 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
12433 return ix86_tls_module_base_symbol;
12436 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
12437 false if we expect this to be used for a memory address and true if
12438 we expect to load the address into a register. */
12441 legitimize_tls_address (rtx x, enum tls_model model, bool for_mov)
12443 rtx dest, base, off;
12444 rtx pic = NULL_RTX, tp = NULL_RTX;
12449 case TLS_MODEL_GLOBAL_DYNAMIC:
12450 dest = gen_reg_rtx (Pmode);
12455 pic = pic_offset_table_rtx;
12458 pic = gen_reg_rtx (Pmode);
12459 emit_insn (gen_set_got (pic));
12463 if (TARGET_GNU2_TLS)
12466 emit_insn (gen_tls_dynamic_gnu2_64 (dest, x));
12468 emit_insn (gen_tls_dynamic_gnu2_32 (dest, x, pic));
12470 tp = get_thread_pointer (true);
12471 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
12473 set_unique_reg_note (get_last_insn (), REG_EQUAL, x);
12477 rtx caddr = ix86_tls_get_addr ();
12481 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
12484 emit_call_insn (gen_tls_global_dynamic_64 (rax, x, caddr));
12485 insns = get_insns ();
12488 RTL_CONST_CALL_P (insns) = 1;
12489 emit_libcall_block (insns, dest, rax, x);
12492 emit_insn (gen_tls_global_dynamic_32 (dest, x, pic, caddr));
12496 case TLS_MODEL_LOCAL_DYNAMIC:
12497 base = gen_reg_rtx (Pmode);
12502 pic = pic_offset_table_rtx;
12505 pic = gen_reg_rtx (Pmode);
12506 emit_insn (gen_set_got (pic));
12510 if (TARGET_GNU2_TLS)
12512 rtx tmp = ix86_tls_module_base ();
12515 emit_insn (gen_tls_dynamic_gnu2_64 (base, tmp));
12517 emit_insn (gen_tls_dynamic_gnu2_32 (base, tmp, pic));
12519 tp = get_thread_pointer (true);
12520 set_unique_reg_note (get_last_insn (), REG_EQUAL,
12521 gen_rtx_MINUS (Pmode, tmp, tp));
12525 rtx caddr = ix86_tls_get_addr ();
12529 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, eqv;
12532 emit_call_insn (gen_tls_local_dynamic_base_64 (rax, caddr));
12533 insns = get_insns ();
12536 /* Attach a unique REG_EQUAL, to allow the RTL optimizers to
12537 share the LD_BASE result with other LD model accesses. */
12538 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
12539 UNSPEC_TLS_LD_BASE);
12541 RTL_CONST_CALL_P (insns) = 1;
12542 emit_libcall_block (insns, base, rax, eqv);
12545 emit_insn (gen_tls_local_dynamic_base_32 (base, pic, caddr));
12548 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
12549 off = gen_rtx_CONST (Pmode, off);
12551 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
12553 if (TARGET_GNU2_TLS)
12555 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
12557 set_unique_reg_note (get_last_insn (), REG_EQUAL, x);
12561 case TLS_MODEL_INITIAL_EXEC:
12564 if (TARGET_SUN_TLS)
12566 /* The Sun linker took the AMD64 TLS spec literally
12567 and can only handle %rax as destination of the
12568 initial executable code sequence. */
12570 dest = gen_reg_rtx (Pmode);
12571 emit_insn (gen_tls_initial_exec_64_sun (dest, x));
12576 type = UNSPEC_GOTNTPOFF;
12580 if (reload_in_progress)
12581 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12582 pic = pic_offset_table_rtx;
12583 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
12585 else if (!TARGET_ANY_GNU_TLS)
12587 pic = gen_reg_rtx (Pmode);
12588 emit_insn (gen_set_got (pic));
12589 type = UNSPEC_GOTTPOFF;
12594 type = UNSPEC_INDNTPOFF;
12597 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
12598 off = gen_rtx_CONST (Pmode, off);
12600 off = gen_rtx_PLUS (Pmode, pic, off);
12601 off = gen_const_mem (Pmode, off);
12602 set_mem_alias_set (off, ix86_GOT_alias_set ());
12604 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12606 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12607 off = force_reg (Pmode, off);
12608 return gen_rtx_PLUS (Pmode, base, off);
12612 base = get_thread_pointer (true);
12613 dest = gen_reg_rtx (Pmode);
12614 emit_insn (gen_subsi3 (dest, base, off));
12618 case TLS_MODEL_LOCAL_EXEC:
12619 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
12620 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12621 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
12622 off = gen_rtx_CONST (Pmode, off);
12624 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12626 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12627 return gen_rtx_PLUS (Pmode, base, off);
12631 base = get_thread_pointer (true);
12632 dest = gen_reg_rtx (Pmode);
12633 emit_insn (gen_subsi3 (dest, base, off));
12638 gcc_unreachable ();
12644 /* Create or return the unique __imp_DECL dllimport symbol corresponding
12647 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
12648 htab_t dllimport_map;
12651 get_dllimport_decl (tree decl)
12653 struct tree_map *h, in;
12656 const char *prefix;
12657 size_t namelen, prefixlen;
12662 if (!dllimport_map)
12663 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
12665 in.hash = htab_hash_pointer (decl);
12666 in.base.from = decl;
12667 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
12668 h = (struct tree_map *) *loc;
12672 *loc = h = ggc_alloc_tree_map ();
12674 h->base.from = decl;
12675 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
12676 VAR_DECL, NULL, ptr_type_node);
12677 DECL_ARTIFICIAL (to) = 1;
12678 DECL_IGNORED_P (to) = 1;
12679 DECL_EXTERNAL (to) = 1;
12680 TREE_READONLY (to) = 1;
12682 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
12683 name = targetm.strip_name_encoding (name);
12684 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
12685 ? "*__imp_" : "*__imp__";
12686 namelen = strlen (name);
12687 prefixlen = strlen (prefix);
12688 imp_name = (char *) alloca (namelen + prefixlen + 1);
12689 memcpy (imp_name, prefix, prefixlen);
12690 memcpy (imp_name + prefixlen, name, namelen + 1);
12692 name = ggc_alloc_string (imp_name, namelen + prefixlen);
12693 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
12694 SET_SYMBOL_REF_DECL (rtl, to);
12695 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
12697 rtl = gen_const_mem (Pmode, rtl);
12698 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
12700 SET_DECL_RTL (to, rtl);
12701 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
12706 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
12707 true if we require the result be a register. */
12710 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
12715 gcc_assert (SYMBOL_REF_DECL (symbol));
12716 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
12718 x = DECL_RTL (imp_decl);
12720 x = force_reg (Pmode, x);
12724 /* Try machine-dependent ways of modifying an illegitimate address
12725 to be legitimate. If we find one, return the new, valid address.
12726 This macro is used in only one place: `memory_address' in explow.c.
12728 OLDX is the address as it was before break_out_memory_refs was called.
12729 In some cases it is useful to look at this to decide what needs to be done.
12731 It is always safe for this macro to do nothing. It exists to recognize
12732 opportunities to optimize the output.
12734 For the 80386, we handle X+REG by loading X into a register R and
12735 using R+REG. R will go in a general reg and indexing will be used.
12736 However, if REG is a broken-out memory address or multiplication,
12737 nothing needs to be done because REG can certainly go in a general reg.
12739 When -fpic is used, special handling is needed for symbolic references.
12740 See comments by legitimize_pic_address in i386.c for details. */
12743 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
12744 enum machine_mode mode)
12749 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
12751 return legitimize_tls_address (x, (enum tls_model) log, false);
12752 if (GET_CODE (x) == CONST
12753 && GET_CODE (XEXP (x, 0)) == PLUS
12754 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12755 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
12757 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
12758 (enum tls_model) log, false);
12759 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12762 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12764 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
12765 return legitimize_dllimport_symbol (x, true);
12766 if (GET_CODE (x) == CONST
12767 && GET_CODE (XEXP (x, 0)) == PLUS
12768 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12769 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
12771 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
12772 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12776 if (flag_pic && SYMBOLIC_CONST (x))
12777 return legitimize_pic_address (x, 0);
12780 if (MACHO_DYNAMIC_NO_PIC_P && SYMBOLIC_CONST (x))
12781 return machopic_indirect_data_reference (x, 0);
12784 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
12785 if (GET_CODE (x) == ASHIFT
12786 && CONST_INT_P (XEXP (x, 1))
12787 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
12790 log = INTVAL (XEXP (x, 1));
12791 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
12792 GEN_INT (1 << log));
12795 if (GET_CODE (x) == PLUS)
12797 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
12799 if (GET_CODE (XEXP (x, 0)) == ASHIFT
12800 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
12801 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
12804 log = INTVAL (XEXP (XEXP (x, 0), 1));
12805 XEXP (x, 0) = gen_rtx_MULT (Pmode,
12806 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
12807 GEN_INT (1 << log));
12810 if (GET_CODE (XEXP (x, 1)) == ASHIFT
12811 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
12812 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
12815 log = INTVAL (XEXP (XEXP (x, 1), 1));
12816 XEXP (x, 1) = gen_rtx_MULT (Pmode,
12817 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
12818 GEN_INT (1 << log));
12821 /* Put multiply first if it isn't already. */
12822 if (GET_CODE (XEXP (x, 1)) == MULT)
12824 rtx tmp = XEXP (x, 0);
12825 XEXP (x, 0) = XEXP (x, 1);
12830 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
12831 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
12832 created by virtual register instantiation, register elimination, and
12833 similar optimizations. */
12834 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
12837 x = gen_rtx_PLUS (Pmode,
12838 gen_rtx_PLUS (Pmode, XEXP (x, 0),
12839 XEXP (XEXP (x, 1), 0)),
12840 XEXP (XEXP (x, 1), 1));
12844 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
12845 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
12846 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
12847 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
12848 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
12849 && CONSTANT_P (XEXP (x, 1)))
12852 rtx other = NULL_RTX;
12854 if (CONST_INT_P (XEXP (x, 1)))
12856 constant = XEXP (x, 1);
12857 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
12859 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
12861 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
12862 other = XEXP (x, 1);
12870 x = gen_rtx_PLUS (Pmode,
12871 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
12872 XEXP (XEXP (XEXP (x, 0), 1), 0)),
12873 plus_constant (other, INTVAL (constant)));
12877 if (changed && ix86_legitimate_address_p (mode, x, false))
12880 if (GET_CODE (XEXP (x, 0)) == MULT)
12883 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
12886 if (GET_CODE (XEXP (x, 1)) == MULT)
12889 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
12893 && REG_P (XEXP (x, 1))
12894 && REG_P (XEXP (x, 0)))
12897 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
12900 x = legitimize_pic_address (x, 0);
12903 if (changed && ix86_legitimate_address_p (mode, x, false))
12906 if (REG_P (XEXP (x, 0)))
12908 rtx temp = gen_reg_rtx (Pmode);
12909 rtx val = force_operand (XEXP (x, 1), temp);
12912 if (GET_MODE (val) != Pmode)
12913 val = convert_to_mode (Pmode, val, 1);
12914 emit_move_insn (temp, val);
12917 XEXP (x, 1) = temp;
12921 else if (REG_P (XEXP (x, 1)))
12923 rtx temp = gen_reg_rtx (Pmode);
12924 rtx val = force_operand (XEXP (x, 0), temp);
12927 if (GET_MODE (val) != Pmode)
12928 val = convert_to_mode (Pmode, val, 1);
12929 emit_move_insn (temp, val);
12932 XEXP (x, 0) = temp;
12940 /* Print an integer constant expression in assembler syntax. Addition
12941 and subtraction are the only arithmetic that may appear in these
12942 expressions. FILE is the stdio stream to write to, X is the rtx, and
12943 CODE is the operand print code from the output string. */
12946 output_pic_addr_const (FILE *file, rtx x, int code)
12950 switch (GET_CODE (x))
12953 gcc_assert (flag_pic);
12958 if (TARGET_64BIT || ! TARGET_MACHO_BRANCH_ISLANDS)
12959 output_addr_const (file, x);
12962 const char *name = XSTR (x, 0);
12964 /* Mark the decl as referenced so that cgraph will
12965 output the function. */
12966 if (SYMBOL_REF_DECL (x))
12967 mark_decl_referenced (SYMBOL_REF_DECL (x));
12970 if (MACHOPIC_INDIRECT
12971 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
12972 name = machopic_indirection_name (x, /*stub_p=*/true);
12974 assemble_name (file, name);
12976 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12977 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
12978 fputs ("@PLT", file);
12985 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
12986 assemble_name (asm_out_file, buf);
12990 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12994 /* This used to output parentheses around the expression,
12995 but that does not work on the 386 (either ATT or BSD assembler). */
12996 output_pic_addr_const (file, XEXP (x, 0), code);
13000 if (GET_MODE (x) == VOIDmode)
13002 /* We can use %d if the number is <32 bits and positive. */
13003 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
13004 fprintf (file, "0x%lx%08lx",
13005 (unsigned long) CONST_DOUBLE_HIGH (x),
13006 (unsigned long) CONST_DOUBLE_LOW (x));
13008 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
13011 /* We can't handle floating point constants;
13012 TARGET_PRINT_OPERAND must handle them. */
13013 output_operand_lossage ("floating constant misused");
13017 /* Some assemblers need integer constants to appear first. */
13018 if (CONST_INT_P (XEXP (x, 0)))
13020 output_pic_addr_const (file, XEXP (x, 0), code);
13022 output_pic_addr_const (file, XEXP (x, 1), code);
13026 gcc_assert (CONST_INT_P (XEXP (x, 1)));
13027 output_pic_addr_const (file, XEXP (x, 1), code);
13029 output_pic_addr_const (file, XEXP (x, 0), code);
13035 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
13036 output_pic_addr_const (file, XEXP (x, 0), code);
13038 output_pic_addr_const (file, XEXP (x, 1), code);
13040 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
13044 if (XINT (x, 1) == UNSPEC_STACK_CHECK)
13046 bool f = i386_asm_output_addr_const_extra (file, x);
13051 gcc_assert (XVECLEN (x, 0) == 1);
13052 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
13053 switch (XINT (x, 1))
13056 fputs ("@GOT", file);
13058 case UNSPEC_GOTOFF:
13059 fputs ("@GOTOFF", file);
13061 case UNSPEC_PLTOFF:
13062 fputs ("@PLTOFF", file);
13065 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13066 "(%rip)" : "[rip]", file);
13068 case UNSPEC_GOTPCREL:
13069 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13070 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
13072 case UNSPEC_GOTTPOFF:
13073 /* FIXME: This might be @TPOFF in Sun ld too. */
13074 fputs ("@gottpoff", file);
13077 fputs ("@tpoff", file);
13079 case UNSPEC_NTPOFF:
13081 fputs ("@tpoff", file);
13083 fputs ("@ntpoff", file);
13085 case UNSPEC_DTPOFF:
13086 fputs ("@dtpoff", file);
13088 case UNSPEC_GOTNTPOFF:
13090 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
13091 "@gottpoff(%rip)": "@gottpoff[rip]", file);
13093 fputs ("@gotntpoff", file);
13095 case UNSPEC_INDNTPOFF:
13096 fputs ("@indntpoff", file);
13099 case UNSPEC_MACHOPIC_OFFSET:
13101 machopic_output_function_base_name (file);
13105 output_operand_lossage ("invalid UNSPEC as operand");
13111 output_operand_lossage ("invalid expression as operand");
13115 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
13116 We need to emit DTP-relative relocations. */
13118 static void ATTRIBUTE_UNUSED
13119 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
13121 fputs (ASM_LONG, file);
13122 output_addr_const (file, x);
13123 fputs ("@dtpoff", file);
13129 fputs (", 0", file);
13132 gcc_unreachable ();
13136 /* Return true if X is a representation of the PIC register. This copes
13137 with calls from ix86_find_base_term, where the register might have
13138 been replaced by a cselib value. */
13141 ix86_pic_register_p (rtx x)
13143 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
13144 return (pic_offset_table_rtx
13145 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
13147 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
13150 /* Helper function for ix86_delegitimize_address.
13151 Attempt to delegitimize TLS local-exec accesses. */
13154 ix86_delegitimize_tls_address (rtx orig_x)
13156 rtx x = orig_x, unspec;
13157 struct ix86_address addr;
13159 if (!TARGET_TLS_DIRECT_SEG_REFS)
13163 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
13165 if (ix86_decompose_address (x, &addr) == 0
13166 || addr.seg != (TARGET_64BIT ? SEG_FS : SEG_GS)
13167 || addr.disp == NULL_RTX
13168 || GET_CODE (addr.disp) != CONST)
13170 unspec = XEXP (addr.disp, 0);
13171 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
13172 unspec = XEXP (unspec, 0);
13173 if (GET_CODE (unspec) != UNSPEC || XINT (unspec, 1) != UNSPEC_NTPOFF)
13175 x = XVECEXP (unspec, 0, 0);
13176 gcc_assert (GET_CODE (x) == SYMBOL_REF);
13177 if (unspec != XEXP (addr.disp, 0))
13178 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.disp, 0), 1));
13181 rtx idx = addr.index;
13182 if (addr.scale != 1)
13183 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
13184 x = gen_rtx_PLUS (Pmode, idx, x);
13187 x = gen_rtx_PLUS (Pmode, addr.base, x);
13188 if (MEM_P (orig_x))
13189 x = replace_equiv_address_nv (orig_x, x);
13193 /* In the name of slightly smaller debug output, and to cater to
13194 general assembler lossage, recognize PIC+GOTOFF and turn it back
13195 into a direct symbol reference.
13197 On Darwin, this is necessary to avoid a crash, because Darwin
13198 has a different PIC label for each routine but the DWARF debugging
13199 information is not associated with any particular routine, so it's
13200 necessary to remove references to the PIC label from RTL stored by
13201 the DWARF output code. */
13204 ix86_delegitimize_address (rtx x)
13206 rtx orig_x = delegitimize_mem_from_attrs (x);
13207 /* addend is NULL or some rtx if x is something+GOTOFF where
13208 something doesn't include the PIC register. */
13209 rtx addend = NULL_RTX;
13210 /* reg_addend is NULL or a multiple of some register. */
13211 rtx reg_addend = NULL_RTX;
13212 /* const_addend is NULL or a const_int. */
13213 rtx const_addend = NULL_RTX;
13214 /* This is the result, or NULL. */
13215 rtx result = NULL_RTX;
13224 if (GET_CODE (x) != CONST
13225 || GET_CODE (XEXP (x, 0)) != UNSPEC
13226 || (XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
13227 && XINT (XEXP (x, 0), 1) != UNSPEC_PCREL)
13228 || !MEM_P (orig_x))
13229 return ix86_delegitimize_tls_address (orig_x);
13230 x = XVECEXP (XEXP (x, 0), 0, 0);
13231 if (GET_MODE (orig_x) != GET_MODE (x))
13233 x = simplify_gen_subreg (GET_MODE (orig_x), x,
13241 if (GET_CODE (x) != PLUS
13242 || GET_CODE (XEXP (x, 1)) != CONST)
13243 return ix86_delegitimize_tls_address (orig_x);
13245 if (ix86_pic_register_p (XEXP (x, 0)))
13246 /* %ebx + GOT/GOTOFF */
13248 else if (GET_CODE (XEXP (x, 0)) == PLUS)
13250 /* %ebx + %reg * scale + GOT/GOTOFF */
13251 reg_addend = XEXP (x, 0);
13252 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
13253 reg_addend = XEXP (reg_addend, 1);
13254 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
13255 reg_addend = XEXP (reg_addend, 0);
13258 reg_addend = NULL_RTX;
13259 addend = XEXP (x, 0);
13263 addend = XEXP (x, 0);
13265 x = XEXP (XEXP (x, 1), 0);
13266 if (GET_CODE (x) == PLUS
13267 && CONST_INT_P (XEXP (x, 1)))
13269 const_addend = XEXP (x, 1);
13273 if (GET_CODE (x) == UNSPEC
13274 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
13275 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
13276 result = XVECEXP (x, 0, 0);
13278 if (TARGET_MACHO && darwin_local_data_pic (x)
13279 && !MEM_P (orig_x))
13280 result = XVECEXP (x, 0, 0);
13283 return ix86_delegitimize_tls_address (orig_x);
13286 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
13288 result = gen_rtx_PLUS (Pmode, reg_addend, result);
13291 /* If the rest of original X doesn't involve the PIC register, add
13292 addend and subtract pic_offset_table_rtx. This can happen e.g.
13294 leal (%ebx, %ecx, 4), %ecx
13296 movl foo@GOTOFF(%ecx), %edx
13297 in which case we return (%ecx - %ebx) + foo. */
13298 if (pic_offset_table_rtx)
13299 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
13300 pic_offset_table_rtx),
13305 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
13307 result = simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
13308 if (result == NULL_RTX)
13314 /* If X is a machine specific address (i.e. a symbol or label being
13315 referenced as a displacement from the GOT implemented using an
13316 UNSPEC), then return the base term. Otherwise return X. */
13319 ix86_find_base_term (rtx x)
13325 if (GET_CODE (x) != CONST)
13327 term = XEXP (x, 0);
13328 if (GET_CODE (term) == PLUS
13329 && (CONST_INT_P (XEXP (term, 1))
13330 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
13331 term = XEXP (term, 0);
13332 if (GET_CODE (term) != UNSPEC
13333 || (XINT (term, 1) != UNSPEC_GOTPCREL
13334 && XINT (term, 1) != UNSPEC_PCREL))
13337 return XVECEXP (term, 0, 0);
13340 return ix86_delegitimize_address (x);
13344 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
13345 int fp, FILE *file)
13347 const char *suffix;
13349 if (mode == CCFPmode || mode == CCFPUmode)
13351 code = ix86_fp_compare_code_to_integer (code);
13355 code = reverse_condition (code);
13406 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
13410 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
13411 Those same assemblers have the same but opposite lossage on cmov. */
13412 if (mode == CCmode)
13413 suffix = fp ? "nbe" : "a";
13414 else if (mode == CCCmode)
13417 gcc_unreachable ();
13433 gcc_unreachable ();
13437 gcc_assert (mode == CCmode || mode == CCCmode);
13454 gcc_unreachable ();
13458 /* ??? As above. */
13459 gcc_assert (mode == CCmode || mode == CCCmode);
13460 suffix = fp ? "nb" : "ae";
13463 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
13467 /* ??? As above. */
13468 if (mode == CCmode)
13470 else if (mode == CCCmode)
13471 suffix = fp ? "nb" : "ae";
13473 gcc_unreachable ();
13476 suffix = fp ? "u" : "p";
13479 suffix = fp ? "nu" : "np";
13482 gcc_unreachable ();
13484 fputs (suffix, file);
13487 /* Print the name of register X to FILE based on its machine mode and number.
13488 If CODE is 'w', pretend the mode is HImode.
13489 If CODE is 'b', pretend the mode is QImode.
13490 If CODE is 'k', pretend the mode is SImode.
13491 If CODE is 'q', pretend the mode is DImode.
13492 If CODE is 'x', pretend the mode is V4SFmode.
13493 If CODE is 't', pretend the mode is V8SFmode.
13494 If CODE is 'h', pretend the reg is the 'high' byte register.
13495 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
13496 If CODE is 'd', duplicate the operand for AVX instruction.
13500 print_reg (rtx x, int code, FILE *file)
13503 bool duplicated = code == 'd' && TARGET_AVX;
13505 gcc_assert (x == pc_rtx
13506 || (REGNO (x) != ARG_POINTER_REGNUM
13507 && REGNO (x) != FRAME_POINTER_REGNUM
13508 && REGNO (x) != FLAGS_REG
13509 && REGNO (x) != FPSR_REG
13510 && REGNO (x) != FPCR_REG));
13512 if (ASSEMBLER_DIALECT == ASM_ATT)
13517 gcc_assert (TARGET_64BIT);
13518 fputs ("rip", file);
13522 if (code == 'w' || MMX_REG_P (x))
13524 else if (code == 'b')
13526 else if (code == 'k')
13528 else if (code == 'q')
13530 else if (code == 'y')
13532 else if (code == 'h')
13534 else if (code == 'x')
13536 else if (code == 't')
13539 code = GET_MODE_SIZE (GET_MODE (x));
13541 /* Irritatingly, AMD extended registers use different naming convention
13542 from the normal registers. */
13543 if (REX_INT_REG_P (x))
13545 gcc_assert (TARGET_64BIT);
13549 error ("extended registers have no high halves");
13552 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
13555 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
13558 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
13561 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
13564 error ("unsupported operand size for extended register");
13574 if (STACK_TOP_P (x))
13583 if (! ANY_FP_REG_P (x))
13584 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
13589 reg = hi_reg_name[REGNO (x)];
13592 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
13594 reg = qi_reg_name[REGNO (x)];
13597 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
13599 reg = qi_high_reg_name[REGNO (x)];
13604 gcc_assert (!duplicated);
13606 fputs (hi_reg_name[REGNO (x)] + 1, file);
13611 gcc_unreachable ();
13617 if (ASSEMBLER_DIALECT == ASM_ATT)
13618 fprintf (file, ", %%%s", reg);
13620 fprintf (file, ", %s", reg);
13624 /* Locate some local-dynamic symbol still in use by this function
13625 so that we can print its name in some tls_local_dynamic_base
13629 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
13633 if (GET_CODE (x) == SYMBOL_REF
13634 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
13636 cfun->machine->some_ld_name = XSTR (x, 0);
13643 static const char *
13644 get_some_local_dynamic_name (void)
13648 if (cfun->machine->some_ld_name)
13649 return cfun->machine->some_ld_name;
13651 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
13652 if (NONDEBUG_INSN_P (insn)
13653 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
13654 return cfun->machine->some_ld_name;
13659 /* Meaning of CODE:
13660 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
13661 C -- print opcode suffix for set/cmov insn.
13662 c -- like C, but print reversed condition
13663 F,f -- likewise, but for floating-point.
13664 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
13666 R -- print the prefix for register names.
13667 z -- print the opcode suffix for the size of the current operand.
13668 Z -- likewise, with special suffixes for x87 instructions.
13669 * -- print a star (in certain assembler syntax)
13670 A -- print an absolute memory reference.
13671 w -- print the operand as if it's a "word" (HImode) even if it isn't.
13672 s -- print a shift double count, followed by the assemblers argument
13674 b -- print the QImode name of the register for the indicated operand.
13675 %b0 would print %al if operands[0] is reg 0.
13676 w -- likewise, print the HImode name of the register.
13677 k -- likewise, print the SImode name of the register.
13678 q -- likewise, print the DImode name of the register.
13679 x -- likewise, print the V4SFmode name of the register.
13680 t -- likewise, print the V8SFmode name of the register.
13681 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
13682 y -- print "st(0)" instead of "st" as a register.
13683 d -- print duplicated register operand for AVX instruction.
13684 D -- print condition for SSE cmp instruction.
13685 P -- if PIC, print an @PLT suffix.
13686 p -- print raw symbol name.
13687 X -- don't print any sort of PIC '@' suffix for a symbol.
13688 & -- print some in-use local-dynamic symbol name.
13689 H -- print a memory address offset by 8; used for sse high-parts
13690 Y -- print condition for XOP pcom* instruction.
13691 + -- print a branch hint as 'cs' or 'ds' prefix
13692 ; -- print a semicolon (after prefixes due to bug in older gas).
13693 ~ -- print "i" if TARGET_AVX2, "f" otherwise.
13694 @ -- print a segment register of thread base pointer load
13698 ix86_print_operand (FILE *file, rtx x, int code)
13705 if (ASSEMBLER_DIALECT == ASM_ATT)
13711 const char *name = get_some_local_dynamic_name ();
13713 output_operand_lossage ("'%%&' used without any "
13714 "local dynamic TLS references");
13716 assemble_name (file, name);
13721 switch (ASSEMBLER_DIALECT)
13728 /* Intel syntax. For absolute addresses, registers should not
13729 be surrounded by braces. */
13733 ix86_print_operand (file, x, 0);
13740 gcc_unreachable ();
13743 ix86_print_operand (file, x, 0);
13748 if (ASSEMBLER_DIALECT == ASM_ATT)
13753 if (ASSEMBLER_DIALECT == ASM_ATT)
13758 if (ASSEMBLER_DIALECT == ASM_ATT)
13763 if (ASSEMBLER_DIALECT == ASM_ATT)
13768 if (ASSEMBLER_DIALECT == ASM_ATT)
13773 if (ASSEMBLER_DIALECT == ASM_ATT)
13778 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
13780 /* Opcodes don't get size suffixes if using Intel opcodes. */
13781 if (ASSEMBLER_DIALECT == ASM_INTEL)
13784 switch (GET_MODE_SIZE (GET_MODE (x)))
13803 output_operand_lossage
13804 ("invalid operand size for operand code '%c'", code);
13809 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
13811 (0, "non-integer operand used with operand code '%c'", code);
13815 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
13816 if (ASSEMBLER_DIALECT == ASM_INTEL)
13819 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
13821 switch (GET_MODE_SIZE (GET_MODE (x)))
13824 #ifdef HAVE_AS_IX86_FILDS
13834 #ifdef HAVE_AS_IX86_FILDQ
13837 fputs ("ll", file);
13845 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
13847 /* 387 opcodes don't get size suffixes
13848 if the operands are registers. */
13849 if (STACK_REG_P (x))
13852 switch (GET_MODE_SIZE (GET_MODE (x)))
13873 output_operand_lossage
13874 ("invalid operand type used with operand code '%c'", code);
13878 output_operand_lossage
13879 ("invalid operand size for operand code '%c'", code);
13897 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
13899 ix86_print_operand (file, x, 0);
13900 fputs (", ", file);
13905 /* Little bit of braindamage here. The SSE compare instructions
13906 does use completely different names for the comparisons that the
13907 fp conditional moves. */
13910 switch (GET_CODE (x))
13913 fputs ("eq", file);
13916 fputs ("eq_us", file);
13919 fputs ("lt", file);
13922 fputs ("nge", file);
13925 fputs ("le", file);
13928 fputs ("ngt", file);
13931 fputs ("unord", file);
13934 fputs ("neq", file);
13937 fputs ("neq_oq", file);
13940 fputs ("ge", file);
13943 fputs ("nlt", file);
13946 fputs ("gt", file);
13949 fputs ("nle", file);
13952 fputs ("ord", file);
13955 output_operand_lossage ("operand is not a condition code, "
13956 "invalid operand code 'D'");
13962 switch (GET_CODE (x))
13966 fputs ("eq", file);
13970 fputs ("lt", file);
13974 fputs ("le", file);
13977 fputs ("unord", file);
13981 fputs ("neq", file);
13985 fputs ("nlt", file);
13989 fputs ("nle", file);
13992 fputs ("ord", file);
13995 output_operand_lossage ("operand is not a condition code, "
13996 "invalid operand code 'D'");
14002 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14003 if (ASSEMBLER_DIALECT == ASM_ATT)
14005 switch (GET_MODE (x))
14007 case HImode: putc ('w', file); break;
14009 case SFmode: putc ('l', file); break;
14011 case DFmode: putc ('q', file); break;
14012 default: gcc_unreachable ();
14019 if (!COMPARISON_P (x))
14021 output_operand_lossage ("operand is neither a constant nor a "
14022 "condition code, invalid operand code "
14026 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
14029 if (!COMPARISON_P (x))
14031 output_operand_lossage ("operand is neither a constant nor a "
14032 "condition code, invalid operand code "
14036 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14037 if (ASSEMBLER_DIALECT == ASM_ATT)
14040 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
14043 /* Like above, but reverse condition */
14045 /* Check to see if argument to %c is really a constant
14046 and not a condition code which needs to be reversed. */
14047 if (!COMPARISON_P (x))
14049 output_operand_lossage ("operand is neither a constant nor a "
14050 "condition code, invalid operand "
14054 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
14057 if (!COMPARISON_P (x))
14059 output_operand_lossage ("operand is neither a constant nor a "
14060 "condition code, invalid operand "
14064 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
14065 if (ASSEMBLER_DIALECT == ASM_ATT)
14068 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
14072 /* It doesn't actually matter what mode we use here, as we're
14073 only going to use this for printing. */
14074 x = adjust_address_nv (x, DImode, 8);
14082 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
14085 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
14088 int pred_val = INTVAL (XEXP (x, 0));
14090 if (pred_val < REG_BR_PROB_BASE * 45 / 100
14091 || pred_val > REG_BR_PROB_BASE * 55 / 100)
14093 int taken = pred_val > REG_BR_PROB_BASE / 2;
14094 int cputaken = final_forward_branch_p (current_output_insn) == 0;
14096 /* Emit hints only in the case default branch prediction
14097 heuristics would fail. */
14098 if (taken != cputaken)
14100 /* We use 3e (DS) prefix for taken branches and
14101 2e (CS) prefix for not taken branches. */
14103 fputs ("ds ; ", file);
14105 fputs ("cs ; ", file);
14113 switch (GET_CODE (x))
14116 fputs ("neq", file);
14119 fputs ("eq", file);
14123 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
14127 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
14131 fputs ("le", file);
14135 fputs ("lt", file);
14138 fputs ("unord", file);
14141 fputs ("ord", file);
14144 fputs ("ueq", file);
14147 fputs ("nlt", file);
14150 fputs ("nle", file);
14153 fputs ("ule", file);
14156 fputs ("ult", file);
14159 fputs ("une", file);
14162 output_operand_lossage ("operand is not a condition code, "
14163 "invalid operand code 'Y'");
14169 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
14175 if (ASSEMBLER_DIALECT == ASM_ATT)
14178 /* The kernel uses a different segment register for performance
14179 reasons; a system call would not have to trash the userspace
14180 segment register, which would be expensive. */
14181 if (TARGET_64BIT && ix86_cmodel != CM_KERNEL)
14182 fputs ("fs", file);
14184 fputs ("gs", file);
14188 putc (TARGET_AVX2 ? 'i' : 'f', file);
14192 output_operand_lossage ("invalid operand code '%c'", code);
14197 print_reg (x, code, file);
14199 else if (MEM_P (x))
14201 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
14202 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
14203 && GET_MODE (x) != BLKmode)
14206 switch (GET_MODE_SIZE (GET_MODE (x)))
14208 case 1: size = "BYTE"; break;
14209 case 2: size = "WORD"; break;
14210 case 4: size = "DWORD"; break;
14211 case 8: size = "QWORD"; break;
14212 case 12: size = "TBYTE"; break;
14214 if (GET_MODE (x) == XFmode)
14219 case 32: size = "YMMWORD"; break;
14221 gcc_unreachable ();
14224 /* Check for explicit size override (codes 'b', 'w', 'k',
14228 else if (code == 'w')
14230 else if (code == 'k')
14232 else if (code == 'q')
14234 else if (code == 'x')
14237 fputs (size, file);
14238 fputs (" PTR ", file);
14242 /* Avoid (%rip) for call operands. */
14243 if (CONSTANT_ADDRESS_P (x) && code == 'P'
14244 && !CONST_INT_P (x))
14245 output_addr_const (file, x);
14246 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
14247 output_operand_lossage ("invalid constraints for operand");
14249 output_address (x);
14252 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
14257 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14258 REAL_VALUE_TO_TARGET_SINGLE (r, l);
14260 if (ASSEMBLER_DIALECT == ASM_ATT)
14262 /* Sign extend 32bit SFmode immediate to 8 bytes. */
14264 fprintf (file, "0x%08llx", (unsigned long long) (int) l);
14266 fprintf (file, "0x%08x", (unsigned int) l);
14269 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
14274 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14275 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
14277 if (ASSEMBLER_DIALECT == ASM_ATT)
14279 fprintf (file, "0x%lx%08lx", l[1] & 0xffffffff, l[0] & 0xffffffff);
14282 /* These float cases don't actually occur as immediate operands. */
14283 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode)
14287 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
14288 fputs (dstr, file);
14293 /* We have patterns that allow zero sets of memory, for instance.
14294 In 64-bit mode, we should probably support all 8-byte vectors,
14295 since we can in fact encode that into an immediate. */
14296 if (GET_CODE (x) == CONST_VECTOR)
14298 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
14302 if (code != 'P' && code != 'p')
14304 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
14306 if (ASSEMBLER_DIALECT == ASM_ATT)
14309 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
14310 || GET_CODE (x) == LABEL_REF)
14312 if (ASSEMBLER_DIALECT == ASM_ATT)
14315 fputs ("OFFSET FLAT:", file);
14318 if (CONST_INT_P (x))
14319 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
14320 else if (flag_pic || MACHOPIC_INDIRECT)
14321 output_pic_addr_const (file, x, code);
14323 output_addr_const (file, x);
14328 ix86_print_operand_punct_valid_p (unsigned char code)
14330 return (code == '@' || code == '*' || code == '+'
14331 || code == '&' || code == ';' || code == '~');
14334 /* Print a memory operand whose address is ADDR. */
14337 ix86_print_operand_address (FILE *file, rtx addr)
14339 struct ix86_address parts;
14340 rtx base, index, disp;
14345 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_VSIBADDR)
14347 ok = ix86_decompose_address (XVECEXP (addr, 0, 0), &parts);
14348 gcc_assert (parts.index == NULL_RTX);
14349 parts.index = XVECEXP (addr, 0, 1);
14350 parts.scale = INTVAL (XVECEXP (addr, 0, 2));
14351 addr = XVECEXP (addr, 0, 0);
14355 ok = ix86_decompose_address (addr, &parts);
14359 if (parts.base && GET_CODE (parts.base) == SUBREG)
14361 rtx tmp = SUBREG_REG (parts.base);
14362 parts.base = simplify_subreg (GET_MODE (parts.base),
14363 tmp, GET_MODE (tmp), 0);
14366 if (parts.index && GET_CODE (parts.index) == SUBREG)
14368 rtx tmp = SUBREG_REG (parts.index);
14369 parts.index = simplify_subreg (GET_MODE (parts.index),
14370 tmp, GET_MODE (tmp), 0);
14374 index = parts.index;
14376 scale = parts.scale;
14384 if (ASSEMBLER_DIALECT == ASM_ATT)
14386 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
14389 gcc_unreachable ();
14392 /* Use one byte shorter RIP relative addressing for 64bit mode. */
14393 if (TARGET_64BIT && !base && !index)
14397 if (GET_CODE (disp) == CONST
14398 && GET_CODE (XEXP (disp, 0)) == PLUS
14399 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14400 symbol = XEXP (XEXP (disp, 0), 0);
14402 if (GET_CODE (symbol) == LABEL_REF
14403 || (GET_CODE (symbol) == SYMBOL_REF
14404 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
14407 if (!base && !index)
14409 /* Displacement only requires special attention. */
14411 if (CONST_INT_P (disp))
14413 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
14414 fputs ("ds:", file);
14415 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
14418 output_pic_addr_const (file, disp, 0);
14420 output_addr_const (file, disp);
14426 /* Print SImode registers for zero-extended addresses to force
14427 addr32 prefix. Otherwise print DImode registers to avoid it. */
14429 code = ((GET_CODE (addr) == ZERO_EXTEND
14430 || GET_CODE (addr) == AND)
14434 if (ASSEMBLER_DIALECT == ASM_ATT)
14439 output_pic_addr_const (file, disp, 0);
14440 else if (GET_CODE (disp) == LABEL_REF)
14441 output_asm_label (disp);
14443 output_addr_const (file, disp);
14448 print_reg (base, code, file);
14452 print_reg (index, vsib ? 0 : code, file);
14453 if (scale != 1 || vsib)
14454 fprintf (file, ",%d", scale);
14460 rtx offset = NULL_RTX;
14464 /* Pull out the offset of a symbol; print any symbol itself. */
14465 if (GET_CODE (disp) == CONST
14466 && GET_CODE (XEXP (disp, 0)) == PLUS
14467 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14469 offset = XEXP (XEXP (disp, 0), 1);
14470 disp = gen_rtx_CONST (VOIDmode,
14471 XEXP (XEXP (disp, 0), 0));
14475 output_pic_addr_const (file, disp, 0);
14476 else if (GET_CODE (disp) == LABEL_REF)
14477 output_asm_label (disp);
14478 else if (CONST_INT_P (disp))
14481 output_addr_const (file, disp);
14487 print_reg (base, code, file);
14490 if (INTVAL (offset) >= 0)
14492 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14496 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14503 print_reg (index, vsib ? 0 : code, file);
14504 if (scale != 1 || vsib)
14505 fprintf (file, "*%d", scale);
14512 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14515 i386_asm_output_addr_const_extra (FILE *file, rtx x)
14519 if (GET_CODE (x) != UNSPEC)
14522 op = XVECEXP (x, 0, 0);
14523 switch (XINT (x, 1))
14525 case UNSPEC_GOTTPOFF:
14526 output_addr_const (file, op);
14527 /* FIXME: This might be @TPOFF in Sun ld. */
14528 fputs ("@gottpoff", file);
14531 output_addr_const (file, op);
14532 fputs ("@tpoff", file);
14534 case UNSPEC_NTPOFF:
14535 output_addr_const (file, op);
14537 fputs ("@tpoff", file);
14539 fputs ("@ntpoff", file);
14541 case UNSPEC_DTPOFF:
14542 output_addr_const (file, op);
14543 fputs ("@dtpoff", file);
14545 case UNSPEC_GOTNTPOFF:
14546 output_addr_const (file, op);
14548 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
14549 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
14551 fputs ("@gotntpoff", file);
14553 case UNSPEC_INDNTPOFF:
14554 output_addr_const (file, op);
14555 fputs ("@indntpoff", file);
14558 case UNSPEC_MACHOPIC_OFFSET:
14559 output_addr_const (file, op);
14561 machopic_output_function_base_name (file);
14565 case UNSPEC_STACK_CHECK:
14569 gcc_assert (flag_split_stack);
14571 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
14572 offset = TARGET_THREAD_SPLIT_STACK_OFFSET;
14574 gcc_unreachable ();
14577 fprintf (file, "%s:%d", TARGET_64BIT ? "%fs" : "%gs", offset);
14588 /* Split one or more double-mode RTL references into pairs of half-mode
14589 references. The RTL can be REG, offsettable MEM, integer constant, or
14590 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
14591 split and "num" is its length. lo_half and hi_half are output arrays
14592 that parallel "operands". */
14595 split_double_mode (enum machine_mode mode, rtx operands[],
14596 int num, rtx lo_half[], rtx hi_half[])
14598 enum machine_mode half_mode;
14604 half_mode = DImode;
14607 half_mode = SImode;
14610 gcc_unreachable ();
14613 byte = GET_MODE_SIZE (half_mode);
14617 rtx op = operands[num];
14619 /* simplify_subreg refuse to split volatile memory addresses,
14620 but we still have to handle it. */
14623 lo_half[num] = adjust_address (op, half_mode, 0);
14624 hi_half[num] = adjust_address (op, half_mode, byte);
14628 lo_half[num] = simplify_gen_subreg (half_mode, op,
14629 GET_MODE (op) == VOIDmode
14630 ? mode : GET_MODE (op), 0);
14631 hi_half[num] = simplify_gen_subreg (half_mode, op,
14632 GET_MODE (op) == VOIDmode
14633 ? mode : GET_MODE (op), byte);
14638 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
14639 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
14640 is the expression of the binary operation. The output may either be
14641 emitted here, or returned to the caller, like all output_* functions.
14643 There is no guarantee that the operands are the same mode, as they
14644 might be within FLOAT or FLOAT_EXTEND expressions. */
14646 #ifndef SYSV386_COMPAT
14647 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
14648 wants to fix the assemblers because that causes incompatibility
14649 with gcc. No-one wants to fix gcc because that causes
14650 incompatibility with assemblers... You can use the option of
14651 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
14652 #define SYSV386_COMPAT 1
14656 output_387_binary_op (rtx insn, rtx *operands)
14658 static char buf[40];
14661 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
14663 #ifdef ENABLE_CHECKING
14664 /* Even if we do not want to check the inputs, this documents input
14665 constraints. Which helps in understanding the following code. */
14666 if (STACK_REG_P (operands[0])
14667 && ((REG_P (operands[1])
14668 && REGNO (operands[0]) == REGNO (operands[1])
14669 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
14670 || (REG_P (operands[2])
14671 && REGNO (operands[0]) == REGNO (operands[2])
14672 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
14673 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
14676 gcc_assert (is_sse);
14679 switch (GET_CODE (operands[3]))
14682 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14683 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14691 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14692 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14700 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14701 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14709 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14710 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14718 gcc_unreachable ();
14725 strcpy (buf, ssep);
14726 if (GET_MODE (operands[0]) == SFmode)
14727 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
14729 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
14733 strcpy (buf, ssep + 1);
14734 if (GET_MODE (operands[0]) == SFmode)
14735 strcat (buf, "ss\t{%2, %0|%0, %2}");
14737 strcat (buf, "sd\t{%2, %0|%0, %2}");
14743 switch (GET_CODE (operands[3]))
14747 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
14749 rtx temp = operands[2];
14750 operands[2] = operands[1];
14751 operands[1] = temp;
14754 /* know operands[0] == operands[1]. */
14756 if (MEM_P (operands[2]))
14762 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14764 if (STACK_TOP_P (operands[0]))
14765 /* How is it that we are storing to a dead operand[2]?
14766 Well, presumably operands[1] is dead too. We can't
14767 store the result to st(0) as st(0) gets popped on this
14768 instruction. Instead store to operands[2] (which I
14769 think has to be st(1)). st(1) will be popped later.
14770 gcc <= 2.8.1 didn't have this check and generated
14771 assembly code that the Unixware assembler rejected. */
14772 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14774 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14778 if (STACK_TOP_P (operands[0]))
14779 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14781 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14786 if (MEM_P (operands[1]))
14792 if (MEM_P (operands[2]))
14798 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14801 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
14802 derived assemblers, confusingly reverse the direction of
14803 the operation for fsub{r} and fdiv{r} when the
14804 destination register is not st(0). The Intel assembler
14805 doesn't have this brain damage. Read !SYSV386_COMPAT to
14806 figure out what the hardware really does. */
14807 if (STACK_TOP_P (operands[0]))
14808 p = "{p\t%0, %2|rp\t%2, %0}";
14810 p = "{rp\t%2, %0|p\t%0, %2}";
14812 if (STACK_TOP_P (operands[0]))
14813 /* As above for fmul/fadd, we can't store to st(0). */
14814 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14816 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14821 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
14824 if (STACK_TOP_P (operands[0]))
14825 p = "{rp\t%0, %1|p\t%1, %0}";
14827 p = "{p\t%1, %0|rp\t%0, %1}";
14829 if (STACK_TOP_P (operands[0]))
14830 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
14832 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
14837 if (STACK_TOP_P (operands[0]))
14839 if (STACK_TOP_P (operands[1]))
14840 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14842 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
14845 else if (STACK_TOP_P (operands[1]))
14848 p = "{\t%1, %0|r\t%0, %1}";
14850 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
14856 p = "{r\t%2, %0|\t%0, %2}";
14858 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14864 gcc_unreachable ();
14871 /* Return needed mode for entity in optimize_mode_switching pass. */
14874 ix86_mode_needed (int entity, rtx insn)
14876 enum attr_i387_cw mode;
14878 /* The mode UNINITIALIZED is used to store control word after a
14879 function call or ASM pattern. The mode ANY specify that function
14880 has no requirements on the control word and make no changes in the
14881 bits we are interested in. */
14884 || (NONJUMP_INSN_P (insn)
14885 && (asm_noperands (PATTERN (insn)) >= 0
14886 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
14887 return I387_CW_UNINITIALIZED;
14889 if (recog_memoized (insn) < 0)
14890 return I387_CW_ANY;
14892 mode = get_attr_i387_cw (insn);
14897 if (mode == I387_CW_TRUNC)
14902 if (mode == I387_CW_FLOOR)
14907 if (mode == I387_CW_CEIL)
14912 if (mode == I387_CW_MASK_PM)
14917 gcc_unreachable ();
14920 return I387_CW_ANY;
14923 /* Output code to initialize control word copies used by trunc?f?i and
14924 rounding patterns. CURRENT_MODE is set to current control word,
14925 while NEW_MODE is set to new control word. */
14928 emit_i387_cw_initialization (int mode)
14930 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
14933 enum ix86_stack_slot slot;
14935 rtx reg = gen_reg_rtx (HImode);
14937 emit_insn (gen_x86_fnstcw_1 (stored_mode));
14938 emit_move_insn (reg, copy_rtx (stored_mode));
14940 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
14941 || optimize_function_for_size_p (cfun))
14945 case I387_CW_TRUNC:
14946 /* round toward zero (truncate) */
14947 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
14948 slot = SLOT_CW_TRUNC;
14951 case I387_CW_FLOOR:
14952 /* round down toward -oo */
14953 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
14954 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
14955 slot = SLOT_CW_FLOOR;
14959 /* round up toward +oo */
14960 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
14961 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
14962 slot = SLOT_CW_CEIL;
14965 case I387_CW_MASK_PM:
14966 /* mask precision exception for nearbyint() */
14967 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
14968 slot = SLOT_CW_MASK_PM;
14972 gcc_unreachable ();
14979 case I387_CW_TRUNC:
14980 /* round toward zero (truncate) */
14981 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
14982 slot = SLOT_CW_TRUNC;
14985 case I387_CW_FLOOR:
14986 /* round down toward -oo */
14987 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
14988 slot = SLOT_CW_FLOOR;
14992 /* round up toward +oo */
14993 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
14994 slot = SLOT_CW_CEIL;
14997 case I387_CW_MASK_PM:
14998 /* mask precision exception for nearbyint() */
14999 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
15000 slot = SLOT_CW_MASK_PM;
15004 gcc_unreachable ();
15008 gcc_assert (slot < MAX_386_STACK_LOCALS);
15010 new_mode = assign_386_stack_local (HImode, slot);
15011 emit_move_insn (new_mode, reg);
15014 /* Output code for INSN to convert a float to a signed int. OPERANDS
15015 are the insn operands. The output may be [HSD]Imode and the input
15016 operand may be [SDX]Fmode. */
15019 output_fix_trunc (rtx insn, rtx *operands, bool fisttp)
15021 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15022 int dimode_p = GET_MODE (operands[0]) == DImode;
15023 int round_mode = get_attr_i387_cw (insn);
15025 /* Jump through a hoop or two for DImode, since the hardware has no
15026 non-popping instruction. We used to do this a different way, but
15027 that was somewhat fragile and broke with post-reload splitters. */
15028 if ((dimode_p || fisttp) && !stack_top_dies)
15029 output_asm_insn ("fld\t%y1", operands);
15031 gcc_assert (STACK_TOP_P (operands[1]));
15032 gcc_assert (MEM_P (operands[0]));
15033 gcc_assert (GET_MODE (operands[1]) != TFmode);
15036 output_asm_insn ("fisttp%Z0\t%0", operands);
15039 if (round_mode != I387_CW_ANY)
15040 output_asm_insn ("fldcw\t%3", operands);
15041 if (stack_top_dies || dimode_p)
15042 output_asm_insn ("fistp%Z0\t%0", operands);
15044 output_asm_insn ("fist%Z0\t%0", operands);
15045 if (round_mode != I387_CW_ANY)
15046 output_asm_insn ("fldcw\t%2", operands);
15052 /* Output code for x87 ffreep insn. The OPNO argument, which may only
15053 have the values zero or one, indicates the ffreep insn's operand
15054 from the OPERANDS array. */
15056 static const char *
15057 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
15059 if (TARGET_USE_FFREEP)
15060 #ifdef HAVE_AS_IX86_FFREEP
15061 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
15064 static char retval[32];
15065 int regno = REGNO (operands[opno]);
15067 gcc_assert (FP_REGNO_P (regno));
15069 regno -= FIRST_STACK_REG;
15071 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
15076 return opno ? "fstp\t%y1" : "fstp\t%y0";
15080 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
15081 should be used. UNORDERED_P is true when fucom should be used. */
15084 output_fp_compare (rtx insn, rtx *operands, bool eflags_p, bool unordered_p)
15086 int stack_top_dies;
15087 rtx cmp_op0, cmp_op1;
15088 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
15092 cmp_op0 = operands[0];
15093 cmp_op1 = operands[1];
15097 cmp_op0 = operands[1];
15098 cmp_op1 = operands[2];
15103 if (GET_MODE (operands[0]) == SFmode)
15105 return "%vucomiss\t{%1, %0|%0, %1}";
15107 return "%vcomiss\t{%1, %0|%0, %1}";
15110 return "%vucomisd\t{%1, %0|%0, %1}";
15112 return "%vcomisd\t{%1, %0|%0, %1}";
15115 gcc_assert (STACK_TOP_P (cmp_op0));
15117 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
15119 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
15121 if (stack_top_dies)
15123 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
15124 return output_387_ffreep (operands, 1);
15127 return "ftst\n\tfnstsw\t%0";
15130 if (STACK_REG_P (cmp_op1)
15132 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
15133 && REGNO (cmp_op1) != FIRST_STACK_REG)
15135 /* If both the top of the 387 stack dies, and the other operand
15136 is also a stack register that dies, then this must be a
15137 `fcompp' float compare */
15141 /* There is no double popping fcomi variant. Fortunately,
15142 eflags is immune from the fstp's cc clobbering. */
15144 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
15146 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
15147 return output_387_ffreep (operands, 0);
15152 return "fucompp\n\tfnstsw\t%0";
15154 return "fcompp\n\tfnstsw\t%0";
15159 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
15161 static const char * const alt[16] =
15163 "fcom%Z2\t%y2\n\tfnstsw\t%0",
15164 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
15165 "fucom%Z2\t%y2\n\tfnstsw\t%0",
15166 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
15168 "ficom%Z2\t%y2\n\tfnstsw\t%0",
15169 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
15173 "fcomi\t{%y1, %0|%0, %y1}",
15174 "fcomip\t{%y1, %0|%0, %y1}",
15175 "fucomi\t{%y1, %0|%0, %y1}",
15176 "fucomip\t{%y1, %0|%0, %y1}",
15187 mask = eflags_p << 3;
15188 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
15189 mask |= unordered_p << 1;
15190 mask |= stack_top_dies;
15192 gcc_assert (mask < 16);
15201 ix86_output_addr_vec_elt (FILE *file, int value)
15203 const char *directive = ASM_LONG;
15207 directive = ASM_QUAD;
15209 gcc_assert (!TARGET_64BIT);
15212 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
15216 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
15218 const char *directive = ASM_LONG;
15221 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
15222 directive = ASM_QUAD;
15224 gcc_assert (!TARGET_64BIT);
15226 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
15227 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
15228 fprintf (file, "%s%s%d-%s%d\n",
15229 directive, LPREFIX, value, LPREFIX, rel);
15230 else if (HAVE_AS_GOTOFF_IN_DATA)
15231 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
15233 else if (TARGET_MACHO)
15235 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
15236 machopic_output_function_base_name (file);
15241 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
15242 GOT_SYMBOL_NAME, LPREFIX, value);
15245 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
15249 ix86_expand_clear (rtx dest)
15253 /* We play register width games, which are only valid after reload. */
15254 gcc_assert (reload_completed);
15256 /* Avoid HImode and its attendant prefix byte. */
15257 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
15258 dest = gen_rtx_REG (SImode, REGNO (dest));
15259 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
15261 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
15262 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
15264 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15265 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
15271 /* X is an unchanging MEM. If it is a constant pool reference, return
15272 the constant pool rtx, else NULL. */
15275 maybe_get_pool_constant (rtx x)
15277 x = ix86_delegitimize_address (XEXP (x, 0));
15279 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
15280 return get_pool_constant (x);
15286 ix86_expand_move (enum machine_mode mode, rtx operands[])
15289 enum tls_model model;
15294 if (GET_CODE (op1) == SYMBOL_REF)
15296 model = SYMBOL_REF_TLS_MODEL (op1);
15299 op1 = legitimize_tls_address (op1, model, true);
15300 op1 = force_operand (op1, op0);
15303 if (GET_MODE (op1) != mode)
15304 op1 = convert_to_mode (mode, op1, 1);
15306 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15307 && SYMBOL_REF_DLLIMPORT_P (op1))
15308 op1 = legitimize_dllimport_symbol (op1, false);
15310 else if (GET_CODE (op1) == CONST
15311 && GET_CODE (XEXP (op1, 0)) == PLUS
15312 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
15314 rtx addend = XEXP (XEXP (op1, 0), 1);
15315 rtx symbol = XEXP (XEXP (op1, 0), 0);
15318 model = SYMBOL_REF_TLS_MODEL (symbol);
15320 tmp = legitimize_tls_address (symbol, model, true);
15321 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15322 && SYMBOL_REF_DLLIMPORT_P (symbol))
15323 tmp = legitimize_dllimport_symbol (symbol, true);
15327 tmp = force_operand (tmp, NULL);
15328 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
15329 op0, 1, OPTAB_DIRECT);
15332 if (GET_MODE (tmp) != mode)
15333 op1 = convert_to_mode (mode, tmp, 1);
15337 if ((flag_pic || MACHOPIC_INDIRECT)
15338 && symbolic_operand (op1, mode))
15340 if (TARGET_MACHO && !TARGET_64BIT)
15343 /* dynamic-no-pic */
15344 if (MACHOPIC_INDIRECT)
15346 rtx temp = ((reload_in_progress
15347 || ((op0 && REG_P (op0))
15349 ? op0 : gen_reg_rtx (Pmode));
15350 op1 = machopic_indirect_data_reference (op1, temp);
15352 op1 = machopic_legitimize_pic_address (op1, mode,
15353 temp == op1 ? 0 : temp);
15355 if (op0 != op1 && GET_CODE (op0) != MEM)
15357 rtx insn = gen_rtx_SET (VOIDmode, op0, op1);
15361 if (GET_CODE (op0) == MEM)
15362 op1 = force_reg (Pmode, op1);
15366 if (GET_CODE (temp) != REG)
15367 temp = gen_reg_rtx (Pmode);
15368 temp = legitimize_pic_address (op1, temp);
15373 /* dynamic-no-pic */
15379 op1 = force_reg (mode, op1);
15380 else if (!(TARGET_64BIT && x86_64_movabs_operand (op1, DImode)))
15382 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
15383 op1 = legitimize_pic_address (op1, reg);
15386 if (GET_MODE (op1) != mode)
15387 op1 = convert_to_mode (mode, op1, 1);
15394 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
15395 || !push_operand (op0, mode))
15397 op1 = force_reg (mode, op1);
15399 if (push_operand (op0, mode)
15400 && ! general_no_elim_operand (op1, mode))
15401 op1 = copy_to_mode_reg (mode, op1);
15403 /* Force large constants in 64bit compilation into register
15404 to get them CSEed. */
15405 if (can_create_pseudo_p ()
15406 && (mode == DImode) && TARGET_64BIT
15407 && immediate_operand (op1, mode)
15408 && !x86_64_zext_immediate_operand (op1, VOIDmode)
15409 && !register_operand (op0, mode)
15411 op1 = copy_to_mode_reg (mode, op1);
15413 if (can_create_pseudo_p ()
15414 && FLOAT_MODE_P (mode)
15415 && GET_CODE (op1) == CONST_DOUBLE)
15417 /* If we are loading a floating point constant to a register,
15418 force the value to memory now, since we'll get better code
15419 out the back end. */
15421 op1 = validize_mem (force_const_mem (mode, op1));
15422 if (!register_operand (op0, mode))
15424 rtx temp = gen_reg_rtx (mode);
15425 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
15426 emit_move_insn (op0, temp);
15432 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15436 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
15438 rtx op0 = operands[0], op1 = operands[1];
15439 unsigned int align = GET_MODE_ALIGNMENT (mode);
15441 /* Force constants other than zero into memory. We do not know how
15442 the instructions used to build constants modify the upper 64 bits
15443 of the register, once we have that information we may be able
15444 to handle some of them more efficiently. */
15445 if (can_create_pseudo_p ()
15446 && register_operand (op0, mode)
15447 && (CONSTANT_P (op1)
15448 || (GET_CODE (op1) == SUBREG
15449 && CONSTANT_P (SUBREG_REG (op1))))
15450 && !standard_sse_constant_p (op1))
15451 op1 = validize_mem (force_const_mem (mode, op1));
15453 /* We need to check memory alignment for SSE mode since attribute
15454 can make operands unaligned. */
15455 if (can_create_pseudo_p ()
15456 && SSE_REG_MODE_P (mode)
15457 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
15458 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
15462 /* ix86_expand_vector_move_misalign() does not like constants ... */
15463 if (CONSTANT_P (op1)
15464 || (GET_CODE (op1) == SUBREG
15465 && CONSTANT_P (SUBREG_REG (op1))))
15466 op1 = validize_mem (force_const_mem (mode, op1));
15468 /* ... nor both arguments in memory. */
15469 if (!register_operand (op0, mode)
15470 && !register_operand (op1, mode))
15471 op1 = force_reg (mode, op1);
15473 tmp[0] = op0; tmp[1] = op1;
15474 ix86_expand_vector_move_misalign (mode, tmp);
15478 /* Make operand1 a register if it isn't already. */
15479 if (can_create_pseudo_p ()
15480 && !register_operand (op0, mode)
15481 && !register_operand (op1, mode))
15483 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
15487 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15490 /* Split 32-byte AVX unaligned load and store if needed. */
15493 ix86_avx256_split_vector_move_misalign (rtx op0, rtx op1)
15496 rtx (*extract) (rtx, rtx, rtx);
15497 rtx (*move_unaligned) (rtx, rtx);
15498 enum machine_mode mode;
15500 switch (GET_MODE (op0))
15503 gcc_unreachable ();
15505 extract = gen_avx_vextractf128v32qi;
15506 move_unaligned = gen_avx_movdqu256;
15510 extract = gen_avx_vextractf128v8sf;
15511 move_unaligned = gen_avx_movups256;
15515 extract = gen_avx_vextractf128v4df;
15516 move_unaligned = gen_avx_movupd256;
15521 if (MEM_P (op1) && TARGET_AVX256_SPLIT_UNALIGNED_LOAD)
15523 rtx r = gen_reg_rtx (mode);
15524 m = adjust_address (op1, mode, 0);
15525 emit_move_insn (r, m);
15526 m = adjust_address (op1, mode, 16);
15527 r = gen_rtx_VEC_CONCAT (GET_MODE (op0), r, m);
15528 emit_move_insn (op0, r);
15530 else if (MEM_P (op0) && TARGET_AVX256_SPLIT_UNALIGNED_STORE)
15532 m = adjust_address (op0, mode, 0);
15533 emit_insn (extract (m, op1, const0_rtx));
15534 m = adjust_address (op0, mode, 16);
15535 emit_insn (extract (m, op1, const1_rtx));
15538 emit_insn (move_unaligned (op0, op1));
15541 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
15542 straight to ix86_expand_vector_move. */
15543 /* Code generation for scalar reg-reg moves of single and double precision data:
15544 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
15548 if (x86_sse_partial_reg_dependency == true)
15553 Code generation for scalar loads of double precision data:
15554 if (x86_sse_split_regs == true)
15555 movlpd mem, reg (gas syntax)
15559 Code generation for unaligned packed loads of single precision data
15560 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
15561 if (x86_sse_unaligned_move_optimal)
15564 if (x86_sse_partial_reg_dependency == true)
15576 Code generation for unaligned packed loads of double precision data
15577 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
15578 if (x86_sse_unaligned_move_optimal)
15581 if (x86_sse_split_regs == true)
15594 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
15603 switch (GET_MODE_CLASS (mode))
15605 case MODE_VECTOR_INT:
15607 switch (GET_MODE_SIZE (mode))
15610 /* If we're optimizing for size, movups is the smallest. */
15611 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15613 op0 = gen_lowpart (V4SFmode, op0);
15614 op1 = gen_lowpart (V4SFmode, op1);
15615 emit_insn (gen_sse_movups (op0, op1));
15618 op0 = gen_lowpart (V16QImode, op0);
15619 op1 = gen_lowpart (V16QImode, op1);
15620 emit_insn (gen_sse2_movdqu (op0, op1));
15623 op0 = gen_lowpart (V32QImode, op0);
15624 op1 = gen_lowpart (V32QImode, op1);
15625 ix86_avx256_split_vector_move_misalign (op0, op1);
15628 gcc_unreachable ();
15631 case MODE_VECTOR_FLOAT:
15632 op0 = gen_lowpart (mode, op0);
15633 op1 = gen_lowpart (mode, op1);
15638 emit_insn (gen_sse_movups (op0, op1));
15641 ix86_avx256_split_vector_move_misalign (op0, op1);
15644 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15646 op0 = gen_lowpart (V4SFmode, op0);
15647 op1 = gen_lowpart (V4SFmode, op1);
15648 emit_insn (gen_sse_movups (op0, op1));
15651 emit_insn (gen_sse2_movupd (op0, op1));
15654 ix86_avx256_split_vector_move_misalign (op0, op1);
15657 gcc_unreachable ();
15662 gcc_unreachable ();
15670 /* If we're optimizing for size, movups is the smallest. */
15671 if (optimize_insn_for_size_p ()
15672 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15674 op0 = gen_lowpart (V4SFmode, op0);
15675 op1 = gen_lowpart (V4SFmode, op1);
15676 emit_insn (gen_sse_movups (op0, op1));
15680 /* ??? If we have typed data, then it would appear that using
15681 movdqu is the only way to get unaligned data loaded with
15683 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15685 op0 = gen_lowpart (V16QImode, op0);
15686 op1 = gen_lowpart (V16QImode, op1);
15687 emit_insn (gen_sse2_movdqu (op0, op1));
15691 if (TARGET_SSE2 && mode == V2DFmode)
15695 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15697 op0 = gen_lowpart (V2DFmode, op0);
15698 op1 = gen_lowpart (V2DFmode, op1);
15699 emit_insn (gen_sse2_movupd (op0, op1));
15703 /* When SSE registers are split into halves, we can avoid
15704 writing to the top half twice. */
15705 if (TARGET_SSE_SPLIT_REGS)
15707 emit_clobber (op0);
15712 /* ??? Not sure about the best option for the Intel chips.
15713 The following would seem to satisfy; the register is
15714 entirely cleared, breaking the dependency chain. We
15715 then store to the upper half, with a dependency depth
15716 of one. A rumor has it that Intel recommends two movsd
15717 followed by an unpacklpd, but this is unconfirmed. And
15718 given that the dependency depth of the unpacklpd would
15719 still be one, I'm not sure why this would be better. */
15720 zero = CONST0_RTX (V2DFmode);
15723 m = adjust_address (op1, DFmode, 0);
15724 emit_insn (gen_sse2_loadlpd (op0, zero, m));
15725 m = adjust_address (op1, DFmode, 8);
15726 emit_insn (gen_sse2_loadhpd (op0, op0, m));
15730 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15732 op0 = gen_lowpart (V4SFmode, op0);
15733 op1 = gen_lowpart (V4SFmode, op1);
15734 emit_insn (gen_sse_movups (op0, op1));
15738 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
15739 emit_move_insn (op0, CONST0_RTX (mode));
15741 emit_clobber (op0);
15743 if (mode != V4SFmode)
15744 op0 = gen_lowpart (V4SFmode, op0);
15745 m = adjust_address (op1, V2SFmode, 0);
15746 emit_insn (gen_sse_loadlps (op0, op0, m));
15747 m = adjust_address (op1, V2SFmode, 8);
15748 emit_insn (gen_sse_loadhps (op0, op0, m));
15751 else if (MEM_P (op0))
15753 /* If we're optimizing for size, movups is the smallest. */
15754 if (optimize_insn_for_size_p ()
15755 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15757 op0 = gen_lowpart (V4SFmode, op0);
15758 op1 = gen_lowpart (V4SFmode, op1);
15759 emit_insn (gen_sse_movups (op0, op1));
15763 /* ??? Similar to above, only less clear because of quote
15764 typeless stores unquote. */
15765 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
15766 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15768 op0 = gen_lowpart (V16QImode, op0);
15769 op1 = gen_lowpart (V16QImode, op1);
15770 emit_insn (gen_sse2_movdqu (op0, op1));
15774 if (TARGET_SSE2 && mode == V2DFmode)
15776 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15778 op0 = gen_lowpart (V2DFmode, op0);
15779 op1 = gen_lowpart (V2DFmode, op1);
15780 emit_insn (gen_sse2_movupd (op0, op1));
15784 m = adjust_address (op0, DFmode, 0);
15785 emit_insn (gen_sse2_storelpd (m, op1));
15786 m = adjust_address (op0, DFmode, 8);
15787 emit_insn (gen_sse2_storehpd (m, op1));
15792 if (mode != V4SFmode)
15793 op1 = gen_lowpart (V4SFmode, op1);
15795 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15797 op0 = gen_lowpart (V4SFmode, op0);
15798 emit_insn (gen_sse_movups (op0, op1));
15802 m = adjust_address (op0, V2SFmode, 0);
15803 emit_insn (gen_sse_storelps (m, op1));
15804 m = adjust_address (op0, V2SFmode, 8);
15805 emit_insn (gen_sse_storehps (m, op1));
15810 gcc_unreachable ();
15813 /* Expand a push in MODE. This is some mode for which we do not support
15814 proper push instructions, at least from the registers that we expect
15815 the value to live in. */
15818 ix86_expand_push (enum machine_mode mode, rtx x)
15822 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
15823 GEN_INT (-GET_MODE_SIZE (mode)),
15824 stack_pointer_rtx, 1, OPTAB_DIRECT);
15825 if (tmp != stack_pointer_rtx)
15826 emit_move_insn (stack_pointer_rtx, tmp);
15828 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
15830 /* When we push an operand onto stack, it has to be aligned at least
15831 at the function argument boundary. However since we don't have
15832 the argument type, we can't determine the actual argument
15834 emit_move_insn (tmp, x);
15837 /* Helper function of ix86_fixup_binary_operands to canonicalize
15838 operand order. Returns true if the operands should be swapped. */
15841 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
15844 rtx dst = operands[0];
15845 rtx src1 = operands[1];
15846 rtx src2 = operands[2];
15848 /* If the operation is not commutative, we can't do anything. */
15849 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
15852 /* Highest priority is that src1 should match dst. */
15853 if (rtx_equal_p (dst, src1))
15855 if (rtx_equal_p (dst, src2))
15858 /* Next highest priority is that immediate constants come second. */
15859 if (immediate_operand (src2, mode))
15861 if (immediate_operand (src1, mode))
15864 /* Lowest priority is that memory references should come second. */
15874 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
15875 destination to use for the operation. If different from the true
15876 destination in operands[0], a copy operation will be required. */
15879 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
15882 rtx dst = operands[0];
15883 rtx src1 = operands[1];
15884 rtx src2 = operands[2];
15886 /* Canonicalize operand order. */
15887 if (ix86_swap_binary_operands_p (code, mode, operands))
15891 /* It is invalid to swap operands of different modes. */
15892 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
15899 /* Both source operands cannot be in memory. */
15900 if (MEM_P (src1) && MEM_P (src2))
15902 /* Optimization: Only read from memory once. */
15903 if (rtx_equal_p (src1, src2))
15905 src2 = force_reg (mode, src2);
15909 src2 = force_reg (mode, src2);
15912 /* If the destination is memory, and we do not have matching source
15913 operands, do things in registers. */
15914 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
15915 dst = gen_reg_rtx (mode);
15917 /* Source 1 cannot be a constant. */
15918 if (CONSTANT_P (src1))
15919 src1 = force_reg (mode, src1);
15921 /* Source 1 cannot be a non-matching memory. */
15922 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
15923 src1 = force_reg (mode, src1);
15925 /* Improve address combine. */
15927 && GET_MODE_CLASS (mode) == MODE_INT
15929 src2 = force_reg (mode, src2);
15931 operands[1] = src1;
15932 operands[2] = src2;
15936 /* Similarly, but assume that the destination has already been
15937 set up properly. */
15940 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
15941 enum machine_mode mode, rtx operands[])
15943 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
15944 gcc_assert (dst == operands[0]);
15947 /* Attempt to expand a binary operator. Make the expansion closer to the
15948 actual machine, then just general_operand, which will allow 3 separate
15949 memory references (one output, two input) in a single insn. */
15952 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
15955 rtx src1, src2, dst, op, clob;
15957 dst = ix86_fixup_binary_operands (code, mode, operands);
15958 src1 = operands[1];
15959 src2 = operands[2];
15961 /* Emit the instruction. */
15963 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
15964 if (reload_in_progress)
15966 /* Reload doesn't know about the flags register, and doesn't know that
15967 it doesn't want to clobber it. We can only do this with PLUS. */
15968 gcc_assert (code == PLUS);
15971 else if (reload_completed
15973 && !rtx_equal_p (dst, src1))
15975 /* This is going to be an LEA; avoid splitting it later. */
15980 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15981 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
15984 /* Fix up the destination if needed. */
15985 if (dst != operands[0])
15986 emit_move_insn (operands[0], dst);
15989 /* Return TRUE or FALSE depending on whether the binary operator meets the
15990 appropriate constraints. */
15993 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
15996 rtx dst = operands[0];
15997 rtx src1 = operands[1];
15998 rtx src2 = operands[2];
16000 /* Both source operands cannot be in memory. */
16001 if (MEM_P (src1) && MEM_P (src2))
16004 /* Canonicalize operand order for commutative operators. */
16005 if (ix86_swap_binary_operands_p (code, mode, operands))
16012 /* If the destination is memory, we must have a matching source operand. */
16013 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
16016 /* Source 1 cannot be a constant. */
16017 if (CONSTANT_P (src1))
16020 /* Source 1 cannot be a non-matching memory. */
16021 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
16022 /* Support "andhi/andsi/anddi" as a zero-extending move. */
16023 return (code == AND
16026 || (TARGET_64BIT && mode == DImode))
16027 && satisfies_constraint_L (src2));
16032 /* Attempt to expand a unary operator. Make the expansion closer to the
16033 actual machine, then just general_operand, which will allow 2 separate
16034 memory references (one output, one input) in a single insn. */
16037 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
16040 int matching_memory;
16041 rtx src, dst, op, clob;
16046 /* If the destination is memory, and we do not have matching source
16047 operands, do things in registers. */
16048 matching_memory = 0;
16051 if (rtx_equal_p (dst, src))
16052 matching_memory = 1;
16054 dst = gen_reg_rtx (mode);
16057 /* When source operand is memory, destination must match. */
16058 if (MEM_P (src) && !matching_memory)
16059 src = force_reg (mode, src);
16061 /* Emit the instruction. */
16063 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
16064 if (reload_in_progress || code == NOT)
16066 /* Reload doesn't know about the flags register, and doesn't know that
16067 it doesn't want to clobber it. */
16068 gcc_assert (code == NOT);
16073 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16074 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16077 /* Fix up the destination if needed. */
16078 if (dst != operands[0])
16079 emit_move_insn (operands[0], dst);
16082 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
16083 divisor are within the range [0-255]. */
16086 ix86_split_idivmod (enum machine_mode mode, rtx operands[],
16089 rtx end_label, qimode_label;
16090 rtx insn, div, mod;
16091 rtx scratch, tmp0, tmp1, tmp2;
16092 rtx (*gen_divmod4_1) (rtx, rtx, rtx, rtx);
16093 rtx (*gen_zero_extend) (rtx, rtx);
16094 rtx (*gen_test_ccno_1) (rtx, rtx);
16099 gen_divmod4_1 = signed_p ? gen_divmodsi4_1 : gen_udivmodsi4_1;
16100 gen_test_ccno_1 = gen_testsi_ccno_1;
16101 gen_zero_extend = gen_zero_extendqisi2;
16104 gen_divmod4_1 = signed_p ? gen_divmoddi4_1 : gen_udivmoddi4_1;
16105 gen_test_ccno_1 = gen_testdi_ccno_1;
16106 gen_zero_extend = gen_zero_extendqidi2;
16109 gcc_unreachable ();
16112 end_label = gen_label_rtx ();
16113 qimode_label = gen_label_rtx ();
16115 scratch = gen_reg_rtx (mode);
16117 /* Use 8bit unsigned divimod if dividend and divisor are within
16118 the range [0-255]. */
16119 emit_move_insn (scratch, operands[2]);
16120 scratch = expand_simple_binop (mode, IOR, scratch, operands[3],
16121 scratch, 1, OPTAB_DIRECT);
16122 emit_insn (gen_test_ccno_1 (scratch, GEN_INT (-0x100)));
16123 tmp0 = gen_rtx_REG (CCNOmode, FLAGS_REG);
16124 tmp0 = gen_rtx_EQ (VOIDmode, tmp0, const0_rtx);
16125 tmp0 = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp0,
16126 gen_rtx_LABEL_REF (VOIDmode, qimode_label),
16128 insn = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp0));
16129 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16130 JUMP_LABEL (insn) = qimode_label;
16132 /* Generate original signed/unsigned divimod. */
16133 div = gen_divmod4_1 (operands[0], operands[1],
16134 operands[2], operands[3]);
16137 /* Branch to the end. */
16138 emit_jump_insn (gen_jump (end_label));
16141 /* Generate 8bit unsigned divide. */
16142 emit_label (qimode_label);
16143 /* Don't use operands[0] for result of 8bit divide since not all
16144 registers support QImode ZERO_EXTRACT. */
16145 tmp0 = simplify_gen_subreg (HImode, scratch, mode, 0);
16146 tmp1 = simplify_gen_subreg (HImode, operands[2], mode, 0);
16147 tmp2 = simplify_gen_subreg (QImode, operands[3], mode, 0);
16148 emit_insn (gen_udivmodhiqi3 (tmp0, tmp1, tmp2));
16152 div = gen_rtx_DIV (SImode, operands[2], operands[3]);
16153 mod = gen_rtx_MOD (SImode, operands[2], operands[3]);
16157 div = gen_rtx_UDIV (SImode, operands[2], operands[3]);
16158 mod = gen_rtx_UMOD (SImode, operands[2], operands[3]);
16161 /* Extract remainder from AH. */
16162 tmp1 = gen_rtx_ZERO_EXTRACT (mode, tmp0, GEN_INT (8), GEN_INT (8));
16163 if (REG_P (operands[1]))
16164 insn = emit_move_insn (operands[1], tmp1);
16167 /* Need a new scratch register since the old one has result
16169 scratch = gen_reg_rtx (mode);
16170 emit_move_insn (scratch, tmp1);
16171 insn = emit_move_insn (operands[1], scratch);
16173 set_unique_reg_note (insn, REG_EQUAL, mod);
16175 /* Zero extend quotient from AL. */
16176 tmp1 = gen_lowpart (QImode, tmp0);
16177 insn = emit_insn (gen_zero_extend (operands[0], tmp1));
16178 set_unique_reg_note (insn, REG_EQUAL, div);
16180 emit_label (end_label);
16183 #define LEA_MAX_STALL (3)
16184 #define LEA_SEARCH_THRESHOLD (LEA_MAX_STALL << 1)
16186 /* Increase given DISTANCE in half-cycles according to
16187 dependencies between PREV and NEXT instructions.
16188 Add 1 half-cycle if there is no dependency and
16189 go to next cycle if there is some dependecy. */
16191 static unsigned int
16192 increase_distance (rtx prev, rtx next, unsigned int distance)
16197 if (!prev || !next)
16198 return distance + (distance & 1) + 2;
16200 if (!DF_INSN_USES (next) || !DF_INSN_DEFS (prev))
16201 return distance + 1;
16203 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16204 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
16205 if (!DF_REF_IS_ARTIFICIAL (*def_rec)
16206 && DF_REF_REGNO (*use_rec) == DF_REF_REGNO (*def_rec))
16207 return distance + (distance & 1) + 2;
16209 return distance + 1;
16212 /* Function checks if instruction INSN defines register number
16213 REGNO1 or REGNO2. */
16216 insn_defines_reg (unsigned int regno1, unsigned int regno2,
16221 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
16222 if (DF_REF_REG_DEF_P (*def_rec)
16223 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16224 && (regno1 == DF_REF_REGNO (*def_rec)
16225 || regno2 == DF_REF_REGNO (*def_rec)))
16233 /* Function checks if instruction INSN uses register number
16234 REGNO as a part of address expression. */
16237 insn_uses_reg_mem (unsigned int regno, rtx insn)
16241 for (use_rec = DF_INSN_USES (insn); *use_rec; use_rec++)
16242 if (DF_REF_REG_MEM_P (*use_rec) && regno == DF_REF_REGNO (*use_rec))
16248 /* Search backward for non-agu definition of register number REGNO1
16249 or register number REGNO2 in basic block starting from instruction
16250 START up to head of basic block or instruction INSN.
16252 Function puts true value into *FOUND var if definition was found
16253 and false otherwise.
16255 Distance in half-cycles between START and found instruction or head
16256 of BB is added to DISTANCE and returned. */
16259 distance_non_agu_define_in_bb (unsigned int regno1, unsigned int regno2,
16260 rtx insn, int distance,
16261 rtx start, bool *found)
16263 basic_block bb = start ? BLOCK_FOR_INSN (start) : NULL;
16266 enum attr_type insn_type;
16272 && distance < LEA_SEARCH_THRESHOLD)
16274 if (NONDEBUG_INSN_P (prev) && NONJUMP_INSN_P (prev))
16276 distance = increase_distance (prev, next, distance);
16277 if (insn_defines_reg (regno1, regno2, prev))
16279 insn_type = get_attr_type (prev);
16280 if (insn_type != TYPE_LEA)
16289 if (prev == BB_HEAD (bb))
16292 prev = PREV_INSN (prev);
16298 /* Search backward for non-agu definition of register number REGNO1
16299 or register number REGNO2 in INSN's basic block until
16300 1. Pass LEA_SEARCH_THRESHOLD instructions, or
16301 2. Reach neighbour BBs boundary, or
16302 3. Reach agu definition.
16303 Returns the distance between the non-agu definition point and INSN.
16304 If no definition point, returns -1. */
16307 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
16310 basic_block bb = BLOCK_FOR_INSN (insn);
16312 bool found = false;
16314 if (insn != BB_HEAD (bb))
16315 distance = distance_non_agu_define_in_bb (regno1, regno2, insn,
16316 distance, PREV_INSN (insn),
16319 if (!found && distance < LEA_SEARCH_THRESHOLD)
16323 bool simple_loop = false;
16325 FOR_EACH_EDGE (e, ei, bb->preds)
16328 simple_loop = true;
16333 distance = distance_non_agu_define_in_bb (regno1, regno2,
16335 BB_END (bb), &found);
16338 int shortest_dist = -1;
16339 bool found_in_bb = false;
16341 FOR_EACH_EDGE (e, ei, bb->preds)
16344 = distance_non_agu_define_in_bb (regno1, regno2,
16350 if (shortest_dist < 0)
16351 shortest_dist = bb_dist;
16352 else if (bb_dist > 0)
16353 shortest_dist = MIN (bb_dist, shortest_dist);
16359 distance = shortest_dist;
16363 /* get_attr_type may modify recog data. We want to make sure
16364 that recog data is valid for instruction INSN, on which
16365 distance_non_agu_define is called. INSN is unchanged here. */
16366 extract_insn_cached (insn);
16371 return distance >> 1;
16374 /* Return the distance in half-cycles between INSN and the next
16375 insn that uses register number REGNO in memory address added
16376 to DISTANCE. Return -1 if REGNO0 is set.
16378 Put true value into *FOUND if register usage was found and
16380 Put true value into *REDEFINED if register redefinition was
16381 found and false otherwise. */
16384 distance_agu_use_in_bb (unsigned int regno,
16385 rtx insn, int distance, rtx start,
16386 bool *found, bool *redefined)
16388 basic_block bb = start ? BLOCK_FOR_INSN (start) : NULL;
16393 *redefined = false;
16397 && distance < LEA_SEARCH_THRESHOLD)
16399 if (NONDEBUG_INSN_P (next) && NONJUMP_INSN_P (next))
16401 distance = increase_distance(prev, next, distance);
16402 if (insn_uses_reg_mem (regno, next))
16404 /* Return DISTANCE if OP0 is used in memory
16405 address in NEXT. */
16410 if (insn_defines_reg (regno, INVALID_REGNUM, next))
16412 /* Return -1 if OP0 is set in NEXT. */
16420 if (next == BB_END (bb))
16423 next = NEXT_INSN (next);
16429 /* Return the distance between INSN and the next insn that uses
16430 register number REGNO0 in memory address. Return -1 if no such
16431 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
16434 distance_agu_use (unsigned int regno0, rtx insn)
16436 basic_block bb = BLOCK_FOR_INSN (insn);
16438 bool found = false;
16439 bool redefined = false;
16441 if (insn != BB_END (bb))
16442 distance = distance_agu_use_in_bb (regno0, insn, distance,
16444 &found, &redefined);
16446 if (!found && !redefined && distance < LEA_SEARCH_THRESHOLD)
16450 bool simple_loop = false;
16452 FOR_EACH_EDGE (e, ei, bb->succs)
16455 simple_loop = true;
16460 distance = distance_agu_use_in_bb (regno0, insn,
16461 distance, BB_HEAD (bb),
16462 &found, &redefined);
16465 int shortest_dist = -1;
16466 bool found_in_bb = false;
16467 bool redefined_in_bb = false;
16469 FOR_EACH_EDGE (e, ei, bb->succs)
16472 = distance_agu_use_in_bb (regno0, insn,
16473 distance, BB_HEAD (e->dest),
16474 &found_in_bb, &redefined_in_bb);
16477 if (shortest_dist < 0)
16478 shortest_dist = bb_dist;
16479 else if (bb_dist > 0)
16480 shortest_dist = MIN (bb_dist, shortest_dist);
16486 distance = shortest_dist;
16490 if (!found || redefined)
16493 return distance >> 1;
16496 /* Define this macro to tune LEA priority vs ADD, it take effect when
16497 there is a dilemma of choicing LEA or ADD
16498 Negative value: ADD is more preferred than LEA
16500 Positive value: LEA is more preferred than ADD*/
16501 #define IX86_LEA_PRIORITY 0
16503 /* Return true if usage of lea INSN has performance advantage
16504 over a sequence of instructions. Instructions sequence has
16505 SPLIT_COST cycles higher latency than lea latency. */
16508 ix86_lea_outperforms (rtx insn, unsigned int regno0, unsigned int regno1,
16509 unsigned int regno2, unsigned int split_cost)
16511 int dist_define, dist_use;
16513 dist_define = distance_non_agu_define (regno1, regno2, insn);
16514 dist_use = distance_agu_use (regno0, insn);
16516 if (dist_define < 0 || dist_define >= LEA_MAX_STALL)
16518 /* If there is no non AGU operand definition, no AGU
16519 operand usage and split cost is 0 then both lea
16520 and non lea variants have same priority. Currently
16521 we prefer lea for 64 bit code and non lea on 32 bit
16523 if (dist_use < 0 && split_cost == 0)
16524 return TARGET_64BIT || IX86_LEA_PRIORITY;
16529 /* With longer definitions distance lea is more preferable.
16530 Here we change it to take into account splitting cost and
16532 dist_define += split_cost + IX86_LEA_PRIORITY;
16534 /* If there is no use in memory addess then we just check
16535 that split cost does not exceed AGU stall. */
16537 return dist_define >= LEA_MAX_STALL;
16539 /* If this insn has both backward non-agu dependence and forward
16540 agu dependence, the one with short distance takes effect. */
16541 return dist_define >= dist_use;
16544 /* Return true if it is legal to clobber flags by INSN and
16545 false otherwise. */
16548 ix86_ok_to_clobber_flags (rtx insn)
16550 basic_block bb = BLOCK_FOR_INSN (insn);
16556 if (NONDEBUG_INSN_P (insn))
16558 for (use = DF_INSN_USES (insn); *use; use++)
16559 if (DF_REF_REG_USE_P (*use) && DF_REF_REGNO (*use) == FLAGS_REG)
16562 if (insn_defines_reg (FLAGS_REG, INVALID_REGNUM, insn))
16566 if (insn == BB_END (bb))
16569 insn = NEXT_INSN (insn);
16572 live = df_get_live_out(bb);
16573 return !REGNO_REG_SET_P (live, FLAGS_REG);
16576 /* Return true if we need to split op0 = op1 + op2 into a sequence of
16577 move and add to avoid AGU stalls. */
16580 ix86_avoid_lea_for_add (rtx insn, rtx operands[])
16582 unsigned int regno0 = true_regnum (operands[0]);
16583 unsigned int regno1 = true_regnum (operands[1]);
16584 unsigned int regno2 = true_regnum (operands[2]);
16586 /* Check if we need to optimize. */
16587 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16590 /* Check it is correct to split here. */
16591 if (!ix86_ok_to_clobber_flags(insn))
16594 /* We need to split only adds with non destructive
16595 destination operand. */
16596 if (regno0 == regno1 || regno0 == regno2)
16599 return !ix86_lea_outperforms (insn, regno0, regno1, regno2, 1);
16602 /* Return true if we should emit lea instruction instead of mov
16606 ix86_use_lea_for_mov (rtx insn, rtx operands[])
16608 unsigned int regno0;
16609 unsigned int regno1;
16611 /* Check if we need to optimize. */
16612 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16615 /* Use lea for reg to reg moves only. */
16616 if (!REG_P (operands[0]) || !REG_P (operands[1]))
16619 regno0 = true_regnum (operands[0]);
16620 regno1 = true_regnum (operands[1]);
16622 return ix86_lea_outperforms (insn, regno0, regno1, -1, 0);
16625 /* Return true if we need to split lea into a sequence of
16626 instructions to avoid AGU stalls. */
16629 ix86_avoid_lea_for_addr (rtx insn, rtx operands[])
16631 unsigned int regno0 = true_regnum (operands[0]) ;
16632 unsigned int regno1 = -1;
16633 unsigned int regno2 = -1;
16634 unsigned int split_cost = 0;
16635 struct ix86_address parts;
16638 /* Check we need to optimize. */
16639 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16642 /* Check it is correct to split here. */
16643 if (!ix86_ok_to_clobber_flags(insn))
16646 ok = ix86_decompose_address (operands[1], &parts);
16649 /* We should not split into add if non legitimate pic
16650 operand is used as displacement. */
16651 if (parts.disp && flag_pic && !LEGITIMATE_PIC_OPERAND_P (parts.disp))
16655 regno1 = true_regnum (parts.base);
16657 regno2 = true_regnum (parts.index);
16659 /* Compute how many cycles we will add to execution time
16660 if split lea into a sequence of instructions. */
16661 if (parts.base || parts.index)
16663 /* Have to use mov instruction if non desctructive
16664 destination form is used. */
16665 if (regno1 != regno0 && regno2 != regno0)
16668 /* Have to add index to base if both exist. */
16669 if (parts.base && parts.index)
16672 /* Have to use shift and adds if scale is 2 or greater. */
16673 if (parts.scale > 1)
16675 if (regno0 != regno1)
16677 else if (regno2 == regno0)
16680 split_cost += parts.scale;
16683 /* Have to use add instruction with immediate if
16684 disp is non zero. */
16685 if (parts.disp && parts.disp != const0_rtx)
16688 /* Subtract the price of lea. */
16692 return !ix86_lea_outperforms (insn, regno0, regno1, regno2, split_cost);
16695 /* Emit x86 binary operand CODE in mode MODE, where the first operand
16696 matches destination. RTX includes clobber of FLAGS_REG. */
16699 ix86_emit_binop (enum rtx_code code, enum machine_mode mode,
16704 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, dst, src));
16705 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16707 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
16710 /* Split lea instructions into a sequence of instructions
16711 which are executed on ALU to avoid AGU stalls.
16712 It is assumed that it is allowed to clobber flags register
16713 at lea position. */
16716 ix86_split_lea_for_addr (rtx operands[], enum machine_mode mode)
16718 unsigned int regno0 = true_regnum (operands[0]) ;
16719 unsigned int regno1 = INVALID_REGNUM;
16720 unsigned int regno2 = INVALID_REGNUM;
16721 struct ix86_address parts;
16725 ok = ix86_decompose_address (operands[1], &parts);
16730 if (GET_MODE (parts.base) != mode)
16731 parts.base = gen_rtx_SUBREG (mode, parts.base, 0);
16732 regno1 = true_regnum (parts.base);
16737 if (GET_MODE (parts.index) != mode)
16738 parts.index = gen_rtx_SUBREG (mode, parts.index, 0);
16739 regno2 = true_regnum (parts.index);
16742 if (parts.scale > 1)
16744 /* Case r1 = r1 + ... */
16745 if (regno1 == regno0)
16747 /* If we have a case r1 = r1 + C * r1 then we
16748 should use multiplication which is very
16749 expensive. Assume cost model is wrong if we
16750 have such case here. */
16751 gcc_assert (regno2 != regno0);
16753 for (adds = parts.scale; adds > 0; adds--)
16754 ix86_emit_binop (PLUS, mode, operands[0], parts.index);
16758 /* r1 = r2 + r3 * C case. Need to move r3 into r1. */
16759 if (regno0 != regno2)
16760 emit_insn (gen_rtx_SET (VOIDmode, operands[0], parts.index));
16762 /* Use shift for scaling. */
16763 ix86_emit_binop (ASHIFT, mode, operands[0],
16764 GEN_INT (exact_log2 (parts.scale)));
16767 ix86_emit_binop (PLUS, mode, operands[0], parts.base);
16769 if (parts.disp && parts.disp != const0_rtx)
16770 ix86_emit_binop (PLUS, mode, operands[0], parts.disp);
16773 else if (!parts.base && !parts.index)
16775 gcc_assert(parts.disp);
16776 emit_insn (gen_rtx_SET (VOIDmode, operands[0], parts.disp));
16782 if (regno0 != regno2)
16783 emit_insn (gen_rtx_SET (VOIDmode, operands[0], parts.index));
16785 else if (!parts.index)
16787 if (regno0 != regno1)
16788 emit_insn (gen_rtx_SET (VOIDmode, operands[0], parts.base));
16792 if (regno0 == regno1)
16794 else if (regno0 == regno2)
16798 emit_insn (gen_rtx_SET (VOIDmode, operands[0], parts.base));
16802 ix86_emit_binop (PLUS, mode, operands[0], tmp);
16805 if (parts.disp && parts.disp != const0_rtx)
16806 ix86_emit_binop (PLUS, mode, operands[0], parts.disp);
16810 /* Return true if it is ok to optimize an ADD operation to LEA
16811 operation to avoid flag register consumation. For most processors,
16812 ADD is faster than LEA. For the processors like ATOM, if the
16813 destination register of LEA holds an actual address which will be
16814 used soon, LEA is better and otherwise ADD is better. */
16817 ix86_lea_for_add_ok (rtx insn, rtx operands[])
16819 unsigned int regno0 = true_regnum (operands[0]);
16820 unsigned int regno1 = true_regnum (operands[1]);
16821 unsigned int regno2 = true_regnum (operands[2]);
16823 /* If a = b + c, (a!=b && a!=c), must use lea form. */
16824 if (regno0 != regno1 && regno0 != regno2)
16827 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16830 return ix86_lea_outperforms (insn, regno0, regno1, regno2, 0);
16833 /* Return true if destination reg of SET_BODY is shift count of
16837 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
16843 /* Retrieve destination of SET_BODY. */
16844 switch (GET_CODE (set_body))
16847 set_dest = SET_DEST (set_body);
16848 if (!set_dest || !REG_P (set_dest))
16852 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
16853 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
16861 /* Retrieve shift count of USE_BODY. */
16862 switch (GET_CODE (use_body))
16865 shift_rtx = XEXP (use_body, 1);
16868 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
16869 if (ix86_dep_by_shift_count_body (set_body,
16870 XVECEXP (use_body, 0, i)))
16878 && (GET_CODE (shift_rtx) == ASHIFT
16879 || GET_CODE (shift_rtx) == LSHIFTRT
16880 || GET_CODE (shift_rtx) == ASHIFTRT
16881 || GET_CODE (shift_rtx) == ROTATE
16882 || GET_CODE (shift_rtx) == ROTATERT))
16884 rtx shift_count = XEXP (shift_rtx, 1);
16886 /* Return true if shift count is dest of SET_BODY. */
16887 if (REG_P (shift_count)
16888 && true_regnum (set_dest) == true_regnum (shift_count))
16895 /* Return true if destination reg of SET_INSN is shift count of
16899 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
16901 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
16902 PATTERN (use_insn));
16905 /* Return TRUE or FALSE depending on whether the unary operator meets the
16906 appropriate constraints. */
16909 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
16910 enum machine_mode mode ATTRIBUTE_UNUSED,
16911 rtx operands[2] ATTRIBUTE_UNUSED)
16913 /* If one of operands is memory, source and destination must match. */
16914 if ((MEM_P (operands[0])
16915 || MEM_P (operands[1]))
16916 && ! rtx_equal_p (operands[0], operands[1]))
16921 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
16922 are ok, keeping in mind the possible movddup alternative. */
16925 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
16927 if (MEM_P (operands[0]))
16928 return rtx_equal_p (operands[0], operands[1 + high]);
16929 if (MEM_P (operands[1]) && MEM_P (operands[2]))
16930 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
16934 /* Post-reload splitter for converting an SF or DFmode value in an
16935 SSE register into an unsigned SImode. */
16938 ix86_split_convert_uns_si_sse (rtx operands[])
16940 enum machine_mode vecmode;
16941 rtx value, large, zero_or_two31, input, two31, x;
16943 large = operands[1];
16944 zero_or_two31 = operands[2];
16945 input = operands[3];
16946 two31 = operands[4];
16947 vecmode = GET_MODE (large);
16948 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
16950 /* Load up the value into the low element. We must ensure that the other
16951 elements are valid floats -- zero is the easiest such value. */
16954 if (vecmode == V4SFmode)
16955 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
16957 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
16961 input = gen_rtx_REG (vecmode, REGNO (input));
16962 emit_move_insn (value, CONST0_RTX (vecmode));
16963 if (vecmode == V4SFmode)
16964 emit_insn (gen_sse_movss (value, value, input));
16966 emit_insn (gen_sse2_movsd (value, value, input));
16969 emit_move_insn (large, two31);
16970 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
16972 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
16973 emit_insn (gen_rtx_SET (VOIDmode, large, x));
16975 x = gen_rtx_AND (vecmode, zero_or_two31, large);
16976 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
16978 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
16979 emit_insn (gen_rtx_SET (VOIDmode, value, x));
16981 large = gen_rtx_REG (V4SImode, REGNO (large));
16982 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
16984 x = gen_rtx_REG (V4SImode, REGNO (value));
16985 if (vecmode == V4SFmode)
16986 emit_insn (gen_fix_truncv4sfv4si2 (x, value));
16988 emit_insn (gen_sse2_cvttpd2dq (x, value));
16991 emit_insn (gen_xorv4si3 (value, value, large));
16994 /* Convert an unsigned DImode value into a DFmode, using only SSE.
16995 Expects the 64-bit DImode to be supplied in a pair of integral
16996 registers. Requires SSE2; will use SSE3 if available. For x86_32,
16997 -mfpmath=sse, !optimize_size only. */
17000 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
17002 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
17003 rtx int_xmm, fp_xmm;
17004 rtx biases, exponents;
17007 int_xmm = gen_reg_rtx (V4SImode);
17008 if (TARGET_INTER_UNIT_MOVES)
17009 emit_insn (gen_movdi_to_sse (int_xmm, input));
17010 else if (TARGET_SSE_SPLIT_REGS)
17012 emit_clobber (int_xmm);
17013 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
17017 x = gen_reg_rtx (V2DImode);
17018 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
17019 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
17022 x = gen_rtx_CONST_VECTOR (V4SImode,
17023 gen_rtvec (4, GEN_INT (0x43300000UL),
17024 GEN_INT (0x45300000UL),
17025 const0_rtx, const0_rtx));
17026 exponents = validize_mem (force_const_mem (V4SImode, x));
17028 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
17029 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
17031 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
17032 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
17033 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
17034 (0x1.0p84 + double(fp_value_hi_xmm)).
17035 Note these exponents differ by 32. */
17037 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
17039 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
17040 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
17041 real_ldexp (&bias_lo_rvt, &dconst1, 52);
17042 real_ldexp (&bias_hi_rvt, &dconst1, 84);
17043 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
17044 x = const_double_from_real_value (bias_hi_rvt, DFmode);
17045 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
17046 biases = validize_mem (force_const_mem (V2DFmode, biases));
17047 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
17049 /* Add the upper and lower DFmode values together. */
17051 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
17054 x = copy_to_mode_reg (V2DFmode, fp_xmm);
17055 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
17056 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
17059 ix86_expand_vector_extract (false, target, fp_xmm, 0);
17062 /* Not used, but eases macroization of patterns. */
17064 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
17065 rtx input ATTRIBUTE_UNUSED)
17067 gcc_unreachable ();
17070 /* Convert an unsigned SImode value into a DFmode. Only currently used
17071 for SSE, but applicable anywhere. */
17074 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
17076 REAL_VALUE_TYPE TWO31r;
17079 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
17080 NULL, 1, OPTAB_DIRECT);
17082 fp = gen_reg_rtx (DFmode);
17083 emit_insn (gen_floatsidf2 (fp, x));
17085 real_ldexp (&TWO31r, &dconst1, 31);
17086 x = const_double_from_real_value (TWO31r, DFmode);
17088 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
17090 emit_move_insn (target, x);
17093 /* Convert a signed DImode value into a DFmode. Only used for SSE in
17094 32-bit mode; otherwise we have a direct convert instruction. */
17097 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
17099 REAL_VALUE_TYPE TWO32r;
17100 rtx fp_lo, fp_hi, x;
17102 fp_lo = gen_reg_rtx (DFmode);
17103 fp_hi = gen_reg_rtx (DFmode);
17105 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
17107 real_ldexp (&TWO32r, &dconst1, 32);
17108 x = const_double_from_real_value (TWO32r, DFmode);
17109 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
17111 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
17113 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
17116 emit_move_insn (target, x);
17119 /* Convert an unsigned SImode value into a SFmode, using only SSE.
17120 For x86_32, -mfpmath=sse, !optimize_size only. */
17122 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
17124 REAL_VALUE_TYPE ONE16r;
17125 rtx fp_hi, fp_lo, int_hi, int_lo, x;
17127 real_ldexp (&ONE16r, &dconst1, 16);
17128 x = const_double_from_real_value (ONE16r, SFmode);
17129 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
17130 NULL, 0, OPTAB_DIRECT);
17131 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
17132 NULL, 0, OPTAB_DIRECT);
17133 fp_hi = gen_reg_rtx (SFmode);
17134 fp_lo = gen_reg_rtx (SFmode);
17135 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
17136 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
17137 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
17139 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
17141 if (!rtx_equal_p (target, fp_hi))
17142 emit_move_insn (target, fp_hi);
17145 /* floatunsv{4,8}siv{4,8}sf2 expander. Expand code to convert
17146 a vector of unsigned ints VAL to vector of floats TARGET. */
17149 ix86_expand_vector_convert_uns_vsivsf (rtx target, rtx val)
17152 REAL_VALUE_TYPE TWO16r;
17153 enum machine_mode intmode = GET_MODE (val);
17154 enum machine_mode fltmode = GET_MODE (target);
17155 rtx (*cvt) (rtx, rtx);
17157 if (intmode == V4SImode)
17158 cvt = gen_floatv4siv4sf2;
17160 cvt = gen_floatv8siv8sf2;
17161 tmp[0] = ix86_build_const_vector (intmode, 1, GEN_INT (0xffff));
17162 tmp[0] = force_reg (intmode, tmp[0]);
17163 tmp[1] = expand_simple_binop (intmode, AND, val, tmp[0], NULL_RTX, 1,
17165 tmp[2] = expand_simple_binop (intmode, LSHIFTRT, val, GEN_INT (16),
17166 NULL_RTX, 1, OPTAB_DIRECT);
17167 tmp[3] = gen_reg_rtx (fltmode);
17168 emit_insn (cvt (tmp[3], tmp[1]));
17169 tmp[4] = gen_reg_rtx (fltmode);
17170 emit_insn (cvt (tmp[4], tmp[2]));
17171 real_ldexp (&TWO16r, &dconst1, 16);
17172 tmp[5] = const_double_from_real_value (TWO16r, SFmode);
17173 tmp[5] = force_reg (fltmode, ix86_build_const_vector (fltmode, 1, tmp[5]));
17174 tmp[6] = expand_simple_binop (fltmode, MULT, tmp[4], tmp[5], NULL_RTX, 1,
17176 tmp[7] = expand_simple_binop (fltmode, PLUS, tmp[3], tmp[6], target, 1,
17178 if (tmp[7] != target)
17179 emit_move_insn (target, tmp[7]);
17182 /* Adjust a V*SFmode/V*DFmode value VAL so that *sfix_trunc* resp. fix_trunc*
17183 pattern can be used on it instead of *ufix_trunc* resp. fixuns_trunc*.
17184 This is done by doing just signed conversion if < 0x1p31, and otherwise by
17185 subtracting 0x1p31 first and xoring in 0x80000000 from *XORP afterwards. */
17188 ix86_expand_adjust_ufix_to_sfix_si (rtx val, rtx *xorp)
17190 REAL_VALUE_TYPE TWO31r;
17191 rtx two31r, tmp[4];
17192 enum machine_mode mode = GET_MODE (val);
17193 enum machine_mode scalarmode = GET_MODE_INNER (mode);
17194 enum machine_mode intmode = GET_MODE_SIZE (mode) == 32 ? V8SImode : V4SImode;
17195 rtx (*cmp) (rtx, rtx, rtx, rtx);
17198 for (i = 0; i < 3; i++)
17199 tmp[i] = gen_reg_rtx (mode);
17200 real_ldexp (&TWO31r, &dconst1, 31);
17201 two31r = const_double_from_real_value (TWO31r, scalarmode);
17202 two31r = ix86_build_const_vector (mode, 1, two31r);
17203 two31r = force_reg (mode, two31r);
17206 case V8SFmode: cmp = gen_avx_maskcmpv8sf3; break;
17207 case V4SFmode: cmp = gen_sse_maskcmpv4sf3; break;
17208 case V4DFmode: cmp = gen_avx_maskcmpv4df3; break;
17209 case V2DFmode: cmp = gen_sse2_maskcmpv2df3; break;
17210 default: gcc_unreachable ();
17212 tmp[3] = gen_rtx_LE (mode, two31r, val);
17213 emit_insn (cmp (tmp[0], two31r, val, tmp[3]));
17214 tmp[1] = expand_simple_binop (mode, AND, tmp[0], two31r, tmp[1],
17216 if (intmode == V4SImode || TARGET_AVX2)
17217 *xorp = expand_simple_binop (intmode, ASHIFT,
17218 gen_lowpart (intmode, tmp[0]),
17219 GEN_INT (31), NULL_RTX, 0,
17223 rtx two31 = GEN_INT ((unsigned HOST_WIDE_INT) 1 << 31);
17224 two31 = ix86_build_const_vector (intmode, 1, two31);
17225 *xorp = expand_simple_binop (intmode, AND,
17226 gen_lowpart (intmode, tmp[0]),
17227 two31, NULL_RTX, 0,
17230 return expand_simple_binop (mode, MINUS, val, tmp[1], tmp[2],
17234 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
17235 then replicate the value for all elements of the vector
17239 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
17243 enum machine_mode scalar_mode;
17260 n_elt = GET_MODE_NUNITS (mode);
17261 v = rtvec_alloc (n_elt);
17262 scalar_mode = GET_MODE_INNER (mode);
17264 RTVEC_ELT (v, 0) = value;
17266 for (i = 1; i < n_elt; ++i)
17267 RTVEC_ELT (v, i) = vect ? value : CONST0_RTX (scalar_mode);
17269 return gen_rtx_CONST_VECTOR (mode, v);
17272 gcc_unreachable ();
17276 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
17277 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
17278 for an SSE register. If VECT is true, then replicate the mask for
17279 all elements of the vector register. If INVERT is true, then create
17280 a mask excluding the sign bit. */
17283 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
17285 enum machine_mode vec_mode, imode;
17286 HOST_WIDE_INT hi, lo;
17291 /* Find the sign bit, sign extended to 2*HWI. */
17299 mode = GET_MODE_INNER (mode);
17301 lo = 0x80000000, hi = lo < 0;
17309 mode = GET_MODE_INNER (mode);
17311 if (HOST_BITS_PER_WIDE_INT >= 64)
17312 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
17314 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
17319 vec_mode = VOIDmode;
17320 if (HOST_BITS_PER_WIDE_INT >= 64)
17323 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
17330 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
17334 lo = ~lo, hi = ~hi;
17340 mask = immed_double_const (lo, hi, imode);
17342 vec = gen_rtvec (2, v, mask);
17343 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
17344 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
17351 gcc_unreachable ();
17355 lo = ~lo, hi = ~hi;
17357 /* Force this value into the low part of a fp vector constant. */
17358 mask = immed_double_const (lo, hi, imode);
17359 mask = gen_lowpart (mode, mask);
17361 if (vec_mode == VOIDmode)
17362 return force_reg (mode, mask);
17364 v = ix86_build_const_vector (vec_mode, vect, mask);
17365 return force_reg (vec_mode, v);
17368 /* Generate code for floating point ABS or NEG. */
17371 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
17374 rtx mask, set, dst, src;
17375 bool use_sse = false;
17376 bool vector_mode = VECTOR_MODE_P (mode);
17377 enum machine_mode vmode = mode;
17381 else if (mode == TFmode)
17383 else if (TARGET_SSE_MATH)
17385 use_sse = SSE_FLOAT_MODE_P (mode);
17386 if (mode == SFmode)
17388 else if (mode == DFmode)
17392 /* NEG and ABS performed with SSE use bitwise mask operations.
17393 Create the appropriate mask now. */
17395 mask = ix86_build_signbit_mask (vmode, vector_mode, code == ABS);
17402 set = gen_rtx_fmt_e (code, mode, src);
17403 set = gen_rtx_SET (VOIDmode, dst, set);
17410 use = gen_rtx_USE (VOIDmode, mask);
17412 par = gen_rtvec (2, set, use);
17415 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
17416 par = gen_rtvec (3, set, use, clob);
17418 emit_insn (gen_rtx_PARALLEL (VOIDmode, par));
17424 /* Expand a copysign operation. Special case operand 0 being a constant. */
17427 ix86_expand_copysign (rtx operands[])
17429 enum machine_mode mode, vmode;
17430 rtx dest, op0, op1, mask, nmask;
17432 dest = operands[0];
17436 mode = GET_MODE (dest);
17438 if (mode == SFmode)
17440 else if (mode == DFmode)
17445 if (GET_CODE (op0) == CONST_DOUBLE)
17447 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
17449 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
17450 op0 = simplify_unary_operation (ABS, mode, op0, mode);
17452 if (mode == SFmode || mode == DFmode)
17454 if (op0 == CONST0_RTX (mode))
17455 op0 = CONST0_RTX (vmode);
17458 rtx v = ix86_build_const_vector (vmode, false, op0);
17460 op0 = force_reg (vmode, v);
17463 else if (op0 != CONST0_RTX (mode))
17464 op0 = force_reg (mode, op0);
17466 mask = ix86_build_signbit_mask (vmode, 0, 0);
17468 if (mode == SFmode)
17469 copysign_insn = gen_copysignsf3_const;
17470 else if (mode == DFmode)
17471 copysign_insn = gen_copysigndf3_const;
17473 copysign_insn = gen_copysigntf3_const;
17475 emit_insn (copysign_insn (dest, op0, op1, mask));
17479 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
17481 nmask = ix86_build_signbit_mask (vmode, 0, 1);
17482 mask = ix86_build_signbit_mask (vmode, 0, 0);
17484 if (mode == SFmode)
17485 copysign_insn = gen_copysignsf3_var;
17486 else if (mode == DFmode)
17487 copysign_insn = gen_copysigndf3_var;
17489 copysign_insn = gen_copysigntf3_var;
17491 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
17495 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
17496 be a constant, and so has already been expanded into a vector constant. */
17499 ix86_split_copysign_const (rtx operands[])
17501 enum machine_mode mode, vmode;
17502 rtx dest, op0, mask, x;
17504 dest = operands[0];
17506 mask = operands[3];
17508 mode = GET_MODE (dest);
17509 vmode = GET_MODE (mask);
17511 dest = simplify_gen_subreg (vmode, dest, mode, 0);
17512 x = gen_rtx_AND (vmode, dest, mask);
17513 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17515 if (op0 != CONST0_RTX (vmode))
17517 x = gen_rtx_IOR (vmode, dest, op0);
17518 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17522 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
17523 so we have to do two masks. */
17526 ix86_split_copysign_var (rtx operands[])
17528 enum machine_mode mode, vmode;
17529 rtx dest, scratch, op0, op1, mask, nmask, x;
17531 dest = operands[0];
17532 scratch = operands[1];
17535 nmask = operands[4];
17536 mask = operands[5];
17538 mode = GET_MODE (dest);
17539 vmode = GET_MODE (mask);
17541 if (rtx_equal_p (op0, op1))
17543 /* Shouldn't happen often (it's useless, obviously), but when it does
17544 we'd generate incorrect code if we continue below. */
17545 emit_move_insn (dest, op0);
17549 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
17551 gcc_assert (REGNO (op1) == REGNO (scratch));
17553 x = gen_rtx_AND (vmode, scratch, mask);
17554 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17557 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17558 x = gen_rtx_NOT (vmode, dest);
17559 x = gen_rtx_AND (vmode, x, op0);
17560 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17564 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
17566 x = gen_rtx_AND (vmode, scratch, mask);
17568 else /* alternative 2,4 */
17570 gcc_assert (REGNO (mask) == REGNO (scratch));
17571 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
17572 x = gen_rtx_AND (vmode, scratch, op1);
17574 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
17576 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
17578 dest = simplify_gen_subreg (vmode, op0, mode, 0);
17579 x = gen_rtx_AND (vmode, dest, nmask);
17581 else /* alternative 3,4 */
17583 gcc_assert (REGNO (nmask) == REGNO (dest));
17585 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
17586 x = gen_rtx_AND (vmode, dest, op0);
17588 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17591 x = gen_rtx_IOR (vmode, dest, scratch);
17592 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
17595 /* Return TRUE or FALSE depending on whether the first SET in INSN
17596 has source and destination with matching CC modes, and that the
17597 CC mode is at least as constrained as REQ_MODE. */
17600 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
17603 enum machine_mode set_mode;
17605 set = PATTERN (insn);
17606 if (GET_CODE (set) == PARALLEL)
17607 set = XVECEXP (set, 0, 0);
17608 gcc_assert (GET_CODE (set) == SET);
17609 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
17611 set_mode = GET_MODE (SET_DEST (set));
17615 if (req_mode != CCNOmode
17616 && (req_mode != CCmode
17617 || XEXP (SET_SRC (set), 1) != const0_rtx))
17621 if (req_mode == CCGCmode)
17625 if (req_mode == CCGOCmode || req_mode == CCNOmode)
17629 if (req_mode == CCZmode)
17639 if (set_mode != req_mode)
17644 gcc_unreachable ();
17647 return GET_MODE (SET_SRC (set)) == set_mode;
17650 /* Generate insn patterns to do an integer compare of OPERANDS. */
17653 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
17655 enum machine_mode cmpmode;
17658 cmpmode = SELECT_CC_MODE (code, op0, op1);
17659 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
17661 /* This is very simple, but making the interface the same as in the
17662 FP case makes the rest of the code easier. */
17663 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
17664 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
17666 /* Return the test that should be put into the flags user, i.e.
17667 the bcc, scc, or cmov instruction. */
17668 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
17671 /* Figure out whether to use ordered or unordered fp comparisons.
17672 Return the appropriate mode to use. */
17675 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
17677 /* ??? In order to make all comparisons reversible, we do all comparisons
17678 non-trapping when compiling for IEEE. Once gcc is able to distinguish
17679 all forms trapping and nontrapping comparisons, we can make inequality
17680 comparisons trapping again, since it results in better code when using
17681 FCOM based compares. */
17682 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
17686 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
17688 enum machine_mode mode = GET_MODE (op0);
17690 if (SCALAR_FLOAT_MODE_P (mode))
17692 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
17693 return ix86_fp_compare_mode (code);
17698 /* Only zero flag is needed. */
17699 case EQ: /* ZF=0 */
17700 case NE: /* ZF!=0 */
17702 /* Codes needing carry flag. */
17703 case GEU: /* CF=0 */
17704 case LTU: /* CF=1 */
17705 /* Detect overflow checks. They need just the carry flag. */
17706 if (GET_CODE (op0) == PLUS
17707 && rtx_equal_p (op1, XEXP (op0, 0)))
17711 case GTU: /* CF=0 & ZF=0 */
17712 case LEU: /* CF=1 | ZF=1 */
17713 /* Detect overflow checks. They need just the carry flag. */
17714 if (GET_CODE (op0) == MINUS
17715 && rtx_equal_p (op1, XEXP (op0, 0)))
17719 /* Codes possibly doable only with sign flag when
17720 comparing against zero. */
17721 case GE: /* SF=OF or SF=0 */
17722 case LT: /* SF<>OF or SF=1 */
17723 if (op1 == const0_rtx)
17726 /* For other cases Carry flag is not required. */
17728 /* Codes doable only with sign flag when comparing
17729 against zero, but we miss jump instruction for it
17730 so we need to use relational tests against overflow
17731 that thus needs to be zero. */
17732 case GT: /* ZF=0 & SF=OF */
17733 case LE: /* ZF=1 | SF<>OF */
17734 if (op1 == const0_rtx)
17738 /* strcmp pattern do (use flags) and combine may ask us for proper
17743 gcc_unreachable ();
17747 /* Return the fixed registers used for condition codes. */
17750 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
17757 /* If two condition code modes are compatible, return a condition code
17758 mode which is compatible with both. Otherwise, return
17761 static enum machine_mode
17762 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
17767 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
17770 if ((m1 == CCGCmode && m2 == CCGOCmode)
17771 || (m1 == CCGOCmode && m2 == CCGCmode))
17777 gcc_unreachable ();
17807 /* These are only compatible with themselves, which we already
17814 /* Return a comparison we can do and that it is equivalent to
17815 swap_condition (code) apart possibly from orderedness.
17816 But, never change orderedness if TARGET_IEEE_FP, returning
17817 UNKNOWN in that case if necessary. */
17819 static enum rtx_code
17820 ix86_fp_swap_condition (enum rtx_code code)
17824 case GT: /* GTU - CF=0 & ZF=0 */
17825 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
17826 case GE: /* GEU - CF=0 */
17827 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
17828 case UNLT: /* LTU - CF=1 */
17829 return TARGET_IEEE_FP ? UNKNOWN : GT;
17830 case UNLE: /* LEU - CF=1 | ZF=1 */
17831 return TARGET_IEEE_FP ? UNKNOWN : GE;
17833 return swap_condition (code);
17837 /* Return cost of comparison CODE using the best strategy for performance.
17838 All following functions do use number of instructions as a cost metrics.
17839 In future this should be tweaked to compute bytes for optimize_size and
17840 take into account performance of various instructions on various CPUs. */
17843 ix86_fp_comparison_cost (enum rtx_code code)
17847 /* The cost of code using bit-twiddling on %ah. */
17864 arith_cost = TARGET_IEEE_FP ? 5 : 4;
17868 arith_cost = TARGET_IEEE_FP ? 6 : 4;
17871 gcc_unreachable ();
17874 switch (ix86_fp_comparison_strategy (code))
17876 case IX86_FPCMP_COMI:
17877 return arith_cost > 4 ? 3 : 2;
17878 case IX86_FPCMP_SAHF:
17879 return arith_cost > 4 ? 4 : 3;
17885 /* Return strategy to use for floating-point. We assume that fcomi is always
17886 preferrable where available, since that is also true when looking at size
17887 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
17889 enum ix86_fpcmp_strategy
17890 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
17892 /* Do fcomi/sahf based test when profitable. */
17895 return IX86_FPCMP_COMI;
17897 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
17898 return IX86_FPCMP_SAHF;
17900 return IX86_FPCMP_ARITH;
17903 /* Swap, force into registers, or otherwise massage the two operands
17904 to a fp comparison. The operands are updated in place; the new
17905 comparison code is returned. */
17907 static enum rtx_code
17908 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
17910 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
17911 rtx op0 = *pop0, op1 = *pop1;
17912 enum machine_mode op_mode = GET_MODE (op0);
17913 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
17915 /* All of the unordered compare instructions only work on registers.
17916 The same is true of the fcomi compare instructions. The XFmode
17917 compare instructions require registers except when comparing
17918 against zero or when converting operand 1 from fixed point to
17922 && (fpcmp_mode == CCFPUmode
17923 || (op_mode == XFmode
17924 && ! (standard_80387_constant_p (op0) == 1
17925 || standard_80387_constant_p (op1) == 1)
17926 && GET_CODE (op1) != FLOAT)
17927 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
17929 op0 = force_reg (op_mode, op0);
17930 op1 = force_reg (op_mode, op1);
17934 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
17935 things around if they appear profitable, otherwise force op0
17936 into a register. */
17938 if (standard_80387_constant_p (op0) == 0
17940 && ! (standard_80387_constant_p (op1) == 0
17943 enum rtx_code new_code = ix86_fp_swap_condition (code);
17944 if (new_code != UNKNOWN)
17947 tmp = op0, op0 = op1, op1 = tmp;
17953 op0 = force_reg (op_mode, op0);
17955 if (CONSTANT_P (op1))
17957 int tmp = standard_80387_constant_p (op1);
17959 op1 = validize_mem (force_const_mem (op_mode, op1));
17963 op1 = force_reg (op_mode, op1);
17966 op1 = force_reg (op_mode, op1);
17970 /* Try to rearrange the comparison to make it cheaper. */
17971 if (ix86_fp_comparison_cost (code)
17972 > ix86_fp_comparison_cost (swap_condition (code))
17973 && (REG_P (op1) || can_create_pseudo_p ()))
17976 tmp = op0, op0 = op1, op1 = tmp;
17977 code = swap_condition (code);
17979 op0 = force_reg (op_mode, op0);
17987 /* Convert comparison codes we use to represent FP comparison to integer
17988 code that will result in proper branch. Return UNKNOWN if no such code
17992 ix86_fp_compare_code_to_integer (enum rtx_code code)
18021 /* Generate insn patterns to do a floating point compare of OPERANDS. */
18024 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
18026 enum machine_mode fpcmp_mode, intcmp_mode;
18029 fpcmp_mode = ix86_fp_compare_mode (code);
18030 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
18032 /* Do fcomi/sahf based test when profitable. */
18033 switch (ix86_fp_comparison_strategy (code))
18035 case IX86_FPCMP_COMI:
18036 intcmp_mode = fpcmp_mode;
18037 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
18038 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
18043 case IX86_FPCMP_SAHF:
18044 intcmp_mode = fpcmp_mode;
18045 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
18046 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
18050 scratch = gen_reg_rtx (HImode);
18051 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
18052 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
18055 case IX86_FPCMP_ARITH:
18056 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
18057 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
18058 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
18060 scratch = gen_reg_rtx (HImode);
18061 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
18063 /* In the unordered case, we have to check C2 for NaN's, which
18064 doesn't happen to work out to anything nice combination-wise.
18065 So do some bit twiddling on the value we've got in AH to come
18066 up with an appropriate set of condition codes. */
18068 intcmp_mode = CCNOmode;
18073 if (code == GT || !TARGET_IEEE_FP)
18075 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
18080 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
18081 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
18082 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
18083 intcmp_mode = CCmode;
18089 if (code == LT && TARGET_IEEE_FP)
18091 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
18092 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
18093 intcmp_mode = CCmode;
18098 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
18104 if (code == GE || !TARGET_IEEE_FP)
18106 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
18111 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
18112 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
18118 if (code == LE && TARGET_IEEE_FP)
18120 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
18121 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
18122 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
18123 intcmp_mode = CCmode;
18128 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
18134 if (code == EQ && TARGET_IEEE_FP)
18136 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
18137 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
18138 intcmp_mode = CCmode;
18143 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
18149 if (code == NE && TARGET_IEEE_FP)
18151 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
18152 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
18158 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
18164 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
18168 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
18173 gcc_unreachable ();
18181 /* Return the test that should be put into the flags user, i.e.
18182 the bcc, scc, or cmov instruction. */
18183 return gen_rtx_fmt_ee (code, VOIDmode,
18184 gen_rtx_REG (intcmp_mode, FLAGS_REG),
18189 ix86_expand_compare (enum rtx_code code, rtx op0, rtx op1)
18193 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
18194 ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
18196 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
18198 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
18199 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
18202 ret = ix86_expand_int_compare (code, op0, op1);
18208 ix86_expand_branch (enum rtx_code code, rtx op0, rtx op1, rtx label)
18210 enum machine_mode mode = GET_MODE (op0);
18222 tmp = ix86_expand_compare (code, op0, op1);
18223 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
18224 gen_rtx_LABEL_REF (VOIDmode, label),
18226 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
18233 /* Expand DImode branch into multiple compare+branch. */
18235 rtx lo[2], hi[2], label2;
18236 enum rtx_code code1, code2, code3;
18237 enum machine_mode submode;
18239 if (CONSTANT_P (op0) && !CONSTANT_P (op1))
18241 tmp = op0, op0 = op1, op1 = tmp;
18242 code = swap_condition (code);
18245 split_double_mode (mode, &op0, 1, lo+0, hi+0);
18246 split_double_mode (mode, &op1, 1, lo+1, hi+1);
18248 submode = mode == DImode ? SImode : DImode;
18250 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
18251 avoid two branches. This costs one extra insn, so disable when
18252 optimizing for size. */
18254 if ((code == EQ || code == NE)
18255 && (!optimize_insn_for_size_p ()
18256 || hi[1] == const0_rtx || lo[1] == const0_rtx))
18261 if (hi[1] != const0_rtx)
18262 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
18263 NULL_RTX, 0, OPTAB_WIDEN);
18266 if (lo[1] != const0_rtx)
18267 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
18268 NULL_RTX, 0, OPTAB_WIDEN);
18270 tmp = expand_binop (submode, ior_optab, xor1, xor0,
18271 NULL_RTX, 0, OPTAB_WIDEN);
18273 ix86_expand_branch (code, tmp, const0_rtx, label);
18277 /* Otherwise, if we are doing less-than or greater-or-equal-than,
18278 op1 is a constant and the low word is zero, then we can just
18279 examine the high word. Similarly for low word -1 and
18280 less-or-equal-than or greater-than. */
18282 if (CONST_INT_P (hi[1]))
18285 case LT: case LTU: case GE: case GEU:
18286 if (lo[1] == const0_rtx)
18288 ix86_expand_branch (code, hi[0], hi[1], label);
18292 case LE: case LEU: case GT: case GTU:
18293 if (lo[1] == constm1_rtx)
18295 ix86_expand_branch (code, hi[0], hi[1], label);
18303 /* Otherwise, we need two or three jumps. */
18305 label2 = gen_label_rtx ();
18308 code2 = swap_condition (code);
18309 code3 = unsigned_condition (code);
18313 case LT: case GT: case LTU: case GTU:
18316 case LE: code1 = LT; code2 = GT; break;
18317 case GE: code1 = GT; code2 = LT; break;
18318 case LEU: code1 = LTU; code2 = GTU; break;
18319 case GEU: code1 = GTU; code2 = LTU; break;
18321 case EQ: code1 = UNKNOWN; code2 = NE; break;
18322 case NE: code2 = UNKNOWN; break;
18325 gcc_unreachable ();
18330 * if (hi(a) < hi(b)) goto true;
18331 * if (hi(a) > hi(b)) goto false;
18332 * if (lo(a) < lo(b)) goto true;
18336 if (code1 != UNKNOWN)
18337 ix86_expand_branch (code1, hi[0], hi[1], label);
18338 if (code2 != UNKNOWN)
18339 ix86_expand_branch (code2, hi[0], hi[1], label2);
18341 ix86_expand_branch (code3, lo[0], lo[1], label);
18343 if (code2 != UNKNOWN)
18344 emit_label (label2);
18349 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC);
18354 /* Split branch based on floating point condition. */
18356 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
18357 rtx target1, rtx target2, rtx tmp, rtx pushed)
18362 if (target2 != pc_rtx)
18365 code = reverse_condition_maybe_unordered (code);
18370 condition = ix86_expand_fp_compare (code, op1, op2,
18373 /* Remove pushed operand from stack. */
18375 ix86_free_from_memory (GET_MODE (pushed));
18377 i = emit_jump_insn (gen_rtx_SET
18379 gen_rtx_IF_THEN_ELSE (VOIDmode,
18380 condition, target1, target2)));
18381 if (split_branch_probability >= 0)
18382 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
18386 ix86_expand_setcc (rtx dest, enum rtx_code code, rtx op0, rtx op1)
18390 gcc_assert (GET_MODE (dest) == QImode);
18392 ret = ix86_expand_compare (code, op0, op1);
18393 PUT_MODE (ret, QImode);
18394 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
18397 /* Expand comparison setting or clearing carry flag. Return true when
18398 successful and set pop for the operation. */
18400 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
18402 enum machine_mode mode =
18403 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
18405 /* Do not handle double-mode compares that go through special path. */
18406 if (mode == (TARGET_64BIT ? TImode : DImode))
18409 if (SCALAR_FLOAT_MODE_P (mode))
18411 rtx compare_op, compare_seq;
18413 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
18415 /* Shortcut: following common codes never translate
18416 into carry flag compares. */
18417 if (code == EQ || code == NE || code == UNEQ || code == LTGT
18418 || code == ORDERED || code == UNORDERED)
18421 /* These comparisons require zero flag; swap operands so they won't. */
18422 if ((code == GT || code == UNLE || code == LE || code == UNGT)
18423 && !TARGET_IEEE_FP)
18428 code = swap_condition (code);
18431 /* Try to expand the comparison and verify that we end up with
18432 carry flag based comparison. This fails to be true only when
18433 we decide to expand comparison using arithmetic that is not
18434 too common scenario. */
18436 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
18437 compare_seq = get_insns ();
18440 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
18441 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
18442 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
18444 code = GET_CODE (compare_op);
18446 if (code != LTU && code != GEU)
18449 emit_insn (compare_seq);
18454 if (!INTEGRAL_MODE_P (mode))
18463 /* Convert a==0 into (unsigned)a<1. */
18466 if (op1 != const0_rtx)
18469 code = (code == EQ ? LTU : GEU);
18472 /* Convert a>b into b<a or a>=b-1. */
18475 if (CONST_INT_P (op1))
18477 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
18478 /* Bail out on overflow. We still can swap operands but that
18479 would force loading of the constant into register. */
18480 if (op1 == const0_rtx
18481 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
18483 code = (code == GTU ? GEU : LTU);
18490 code = (code == GTU ? LTU : GEU);
18494 /* Convert a>=0 into (unsigned)a<0x80000000. */
18497 if (mode == DImode || op1 != const0_rtx)
18499 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18500 code = (code == LT ? GEU : LTU);
18504 if (mode == DImode || op1 != constm1_rtx)
18506 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
18507 code = (code == LE ? GEU : LTU);
18513 /* Swapping operands may cause constant to appear as first operand. */
18514 if (!nonimmediate_operand (op0, VOIDmode))
18516 if (!can_create_pseudo_p ())
18518 op0 = force_reg (mode, op0);
18520 *pop = ix86_expand_compare (code, op0, op1);
18521 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
18526 ix86_expand_int_movcc (rtx operands[])
18528 enum rtx_code code = GET_CODE (operands[1]), compare_code;
18529 rtx compare_seq, compare_op;
18530 enum machine_mode mode = GET_MODE (operands[0]);
18531 bool sign_bit_compare_p = false;
18532 rtx op0 = XEXP (operands[1], 0);
18533 rtx op1 = XEXP (operands[1], 1);
18536 compare_op = ix86_expand_compare (code, op0, op1);
18537 compare_seq = get_insns ();
18540 compare_code = GET_CODE (compare_op);
18542 if ((op1 == const0_rtx && (code == GE || code == LT))
18543 || (op1 == constm1_rtx && (code == GT || code == LE)))
18544 sign_bit_compare_p = true;
18546 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
18547 HImode insns, we'd be swallowed in word prefix ops. */
18549 if ((mode != HImode || TARGET_FAST_PREFIX)
18550 && (mode != (TARGET_64BIT ? TImode : DImode))
18551 && CONST_INT_P (operands[2])
18552 && CONST_INT_P (operands[3]))
18554 rtx out = operands[0];
18555 HOST_WIDE_INT ct = INTVAL (operands[2]);
18556 HOST_WIDE_INT cf = INTVAL (operands[3]);
18557 HOST_WIDE_INT diff;
18560 /* Sign bit compares are better done using shifts than we do by using
18562 if (sign_bit_compare_p
18563 || ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
18565 /* Detect overlap between destination and compare sources. */
18568 if (!sign_bit_compare_p)
18571 bool fpcmp = false;
18573 compare_code = GET_CODE (compare_op);
18575 flags = XEXP (compare_op, 0);
18577 if (GET_MODE (flags) == CCFPmode
18578 || GET_MODE (flags) == CCFPUmode)
18582 = ix86_fp_compare_code_to_integer (compare_code);
18585 /* To simplify rest of code, restrict to the GEU case. */
18586 if (compare_code == LTU)
18588 HOST_WIDE_INT tmp = ct;
18591 compare_code = reverse_condition (compare_code);
18592 code = reverse_condition (code);
18597 PUT_CODE (compare_op,
18598 reverse_condition_maybe_unordered
18599 (GET_CODE (compare_op)));
18601 PUT_CODE (compare_op,
18602 reverse_condition (GET_CODE (compare_op)));
18606 if (reg_overlap_mentioned_p (out, op0)
18607 || reg_overlap_mentioned_p (out, op1))
18608 tmp = gen_reg_rtx (mode);
18610 if (mode == DImode)
18611 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
18613 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
18614 flags, compare_op));
18618 if (code == GT || code == GE)
18619 code = reverse_condition (code);
18622 HOST_WIDE_INT tmp = ct;
18627 tmp = emit_store_flag (tmp, code, op0, op1, VOIDmode, 0, -1);
18640 tmp = expand_simple_binop (mode, PLUS,
18642 copy_rtx (tmp), 1, OPTAB_DIRECT);
18653 tmp = expand_simple_binop (mode, IOR,
18655 copy_rtx (tmp), 1, OPTAB_DIRECT);
18657 else if (diff == -1 && ct)
18667 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18669 tmp = expand_simple_binop (mode, PLUS,
18670 copy_rtx (tmp), GEN_INT (cf),
18671 copy_rtx (tmp), 1, OPTAB_DIRECT);
18679 * andl cf - ct, dest
18689 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
18692 tmp = expand_simple_binop (mode, AND,
18694 gen_int_mode (cf - ct, mode),
18695 copy_rtx (tmp), 1, OPTAB_DIRECT);
18697 tmp = expand_simple_binop (mode, PLUS,
18698 copy_rtx (tmp), GEN_INT (ct),
18699 copy_rtx (tmp), 1, OPTAB_DIRECT);
18702 if (!rtx_equal_p (tmp, out))
18703 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
18710 enum machine_mode cmp_mode = GET_MODE (op0);
18713 tmp = ct, ct = cf, cf = tmp;
18716 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18718 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18720 /* We may be reversing unordered compare to normal compare, that
18721 is not valid in general (we may convert non-trapping condition
18722 to trapping one), however on i386 we currently emit all
18723 comparisons unordered. */
18724 compare_code = reverse_condition_maybe_unordered (compare_code);
18725 code = reverse_condition_maybe_unordered (code);
18729 compare_code = reverse_condition (compare_code);
18730 code = reverse_condition (code);
18734 compare_code = UNKNOWN;
18735 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
18736 && CONST_INT_P (op1))
18738 if (op1 == const0_rtx
18739 && (code == LT || code == GE))
18740 compare_code = code;
18741 else if (op1 == constm1_rtx)
18745 else if (code == GT)
18750 /* Optimize dest = (op0 < 0) ? -1 : cf. */
18751 if (compare_code != UNKNOWN
18752 && GET_MODE (op0) == GET_MODE (out)
18753 && (cf == -1 || ct == -1))
18755 /* If lea code below could be used, only optimize
18756 if it results in a 2 insn sequence. */
18758 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
18759 || diff == 3 || diff == 5 || diff == 9)
18760 || (compare_code == LT && ct == -1)
18761 || (compare_code == GE && cf == -1))
18764 * notl op1 (if necessary)
18772 code = reverse_condition (code);
18775 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18777 out = expand_simple_binop (mode, IOR,
18779 out, 1, OPTAB_DIRECT);
18780 if (out != operands[0])
18781 emit_move_insn (operands[0], out);
18788 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
18789 || diff == 3 || diff == 5 || diff == 9)
18790 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
18792 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
18798 * lea cf(dest*(ct-cf)),dest
18802 * This also catches the degenerate setcc-only case.
18808 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18811 /* On x86_64 the lea instruction operates on Pmode, so we need
18812 to get arithmetics done in proper mode to match. */
18814 tmp = copy_rtx (out);
18818 out1 = copy_rtx (out);
18819 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
18823 tmp = gen_rtx_PLUS (mode, tmp, out1);
18829 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
18832 if (!rtx_equal_p (tmp, out))
18835 out = force_operand (tmp, copy_rtx (out));
18837 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
18839 if (!rtx_equal_p (out, operands[0]))
18840 emit_move_insn (operands[0], copy_rtx (out));
18846 * General case: Jumpful:
18847 * xorl dest,dest cmpl op1, op2
18848 * cmpl op1, op2 movl ct, dest
18849 * setcc dest jcc 1f
18850 * decl dest movl cf, dest
18851 * andl (cf-ct),dest 1:
18854 * Size 20. Size 14.
18856 * This is reasonably steep, but branch mispredict costs are
18857 * high on modern cpus, so consider failing only if optimizing
18861 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18862 && BRANCH_COST (optimize_insn_for_speed_p (),
18867 enum machine_mode cmp_mode = GET_MODE (op0);
18872 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18874 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18876 /* We may be reversing unordered compare to normal compare,
18877 that is not valid in general (we may convert non-trapping
18878 condition to trapping one), however on i386 we currently
18879 emit all comparisons unordered. */
18880 code = reverse_condition_maybe_unordered (code);
18884 code = reverse_condition (code);
18885 if (compare_code != UNKNOWN)
18886 compare_code = reverse_condition (compare_code);
18890 if (compare_code != UNKNOWN)
18892 /* notl op1 (if needed)
18897 For x < 0 (resp. x <= -1) there will be no notl,
18898 so if possible swap the constants to get rid of the
18900 True/false will be -1/0 while code below (store flag
18901 followed by decrement) is 0/-1, so the constants need
18902 to be exchanged once more. */
18904 if (compare_code == GE || !cf)
18906 code = reverse_condition (code);
18911 HOST_WIDE_INT tmp = cf;
18916 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18920 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18922 out = expand_simple_binop (mode, PLUS, copy_rtx (out),
18924 copy_rtx (out), 1, OPTAB_DIRECT);
18927 out = expand_simple_binop (mode, AND, copy_rtx (out),
18928 gen_int_mode (cf - ct, mode),
18929 copy_rtx (out), 1, OPTAB_DIRECT);
18931 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
18932 copy_rtx (out), 1, OPTAB_DIRECT);
18933 if (!rtx_equal_p (out, operands[0]))
18934 emit_move_insn (operands[0], copy_rtx (out));
18940 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18942 /* Try a few things more with specific constants and a variable. */
18945 rtx var, orig_out, out, tmp;
18947 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
18950 /* If one of the two operands is an interesting constant, load a
18951 constant with the above and mask it in with a logical operation. */
18953 if (CONST_INT_P (operands[2]))
18956 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
18957 operands[3] = constm1_rtx, op = and_optab;
18958 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
18959 operands[3] = const0_rtx, op = ior_optab;
18963 else if (CONST_INT_P (operands[3]))
18966 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
18967 operands[2] = constm1_rtx, op = and_optab;
18968 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
18969 operands[2] = const0_rtx, op = ior_optab;
18976 orig_out = operands[0];
18977 tmp = gen_reg_rtx (mode);
18980 /* Recurse to get the constant loaded. */
18981 if (ix86_expand_int_movcc (operands) == 0)
18984 /* Mask in the interesting variable. */
18985 out = expand_binop (mode, op, var, tmp, orig_out, 0,
18987 if (!rtx_equal_p (out, orig_out))
18988 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
18994 * For comparison with above,
19004 if (! nonimmediate_operand (operands[2], mode))
19005 operands[2] = force_reg (mode, operands[2]);
19006 if (! nonimmediate_operand (operands[3], mode))
19007 operands[3] = force_reg (mode, operands[3]);
19009 if (! register_operand (operands[2], VOIDmode)
19011 || ! register_operand (operands[3], VOIDmode)))
19012 operands[2] = force_reg (mode, operands[2]);
19015 && ! register_operand (operands[3], VOIDmode))
19016 operands[3] = force_reg (mode, operands[3]);
19018 emit_insn (compare_seq);
19019 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
19020 gen_rtx_IF_THEN_ELSE (mode,
19021 compare_op, operands[2],
19026 /* Swap, force into registers, or otherwise massage the two operands
19027 to an sse comparison with a mask result. Thus we differ a bit from
19028 ix86_prepare_fp_compare_args which expects to produce a flags result.
19030 The DEST operand exists to help determine whether to commute commutative
19031 operators. The POP0/POP1 operands are updated in place. The new
19032 comparison code is returned, or UNKNOWN if not implementable. */
19034 static enum rtx_code
19035 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
19036 rtx *pop0, rtx *pop1)
19044 /* AVX supports all the needed comparisons. */
19047 /* We have no LTGT as an operator. We could implement it with
19048 NE & ORDERED, but this requires an extra temporary. It's
19049 not clear that it's worth it. */
19056 /* These are supported directly. */
19063 /* AVX has 3 operand comparisons, no need to swap anything. */
19066 /* For commutative operators, try to canonicalize the destination
19067 operand to be first in the comparison - this helps reload to
19068 avoid extra moves. */
19069 if (!dest || !rtx_equal_p (dest, *pop1))
19077 /* These are not supported directly before AVX, and furthermore
19078 ix86_expand_sse_fp_minmax only optimizes LT/UNGE. Swap the
19079 comparison operands to transform into something that is
19084 code = swap_condition (code);
19088 gcc_unreachable ();
19094 /* Detect conditional moves that exactly match min/max operational
19095 semantics. Note that this is IEEE safe, as long as we don't
19096 interchange the operands.
19098 Returns FALSE if this conditional move doesn't match a MIN/MAX,
19099 and TRUE if the operation is successful and instructions are emitted. */
19102 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
19103 rtx cmp_op1, rtx if_true, rtx if_false)
19105 enum machine_mode mode;
19111 else if (code == UNGE)
19114 if_true = if_false;
19120 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
19122 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
19127 mode = GET_MODE (dest);
19129 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
19130 but MODE may be a vector mode and thus not appropriate. */
19131 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
19133 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
19136 if_true = force_reg (mode, if_true);
19137 v = gen_rtvec (2, if_true, if_false);
19138 tmp = gen_rtx_UNSPEC (mode, v, u);
19142 code = is_min ? SMIN : SMAX;
19143 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
19146 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
19150 /* Expand an sse vector comparison. Return the register with the result. */
19153 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
19154 rtx op_true, rtx op_false)
19156 enum machine_mode mode = GET_MODE (dest);
19157 enum machine_mode cmp_mode = GET_MODE (cmp_op0);
19160 cmp_op0 = force_reg (cmp_mode, cmp_op0);
19161 if (!nonimmediate_operand (cmp_op1, cmp_mode))
19162 cmp_op1 = force_reg (cmp_mode, cmp_op1);
19165 || reg_overlap_mentioned_p (dest, op_true)
19166 || reg_overlap_mentioned_p (dest, op_false))
19167 dest = gen_reg_rtx (mode);
19169 x = gen_rtx_fmt_ee (code, cmp_mode, cmp_op0, cmp_op1);
19170 if (cmp_mode != mode)
19172 x = force_reg (cmp_mode, x);
19173 convert_move (dest, x, false);
19176 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
19181 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
19182 operations. This is used for both scalar and vector conditional moves. */
19185 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
19187 enum machine_mode mode = GET_MODE (dest);
19190 if (vector_all_ones_operand (op_true, mode)
19191 && rtx_equal_p (op_false, CONST0_RTX (mode)))
19193 emit_insn (gen_rtx_SET (VOIDmode, dest, cmp));
19195 else if (op_false == CONST0_RTX (mode))
19197 op_true = force_reg (mode, op_true);
19198 x = gen_rtx_AND (mode, cmp, op_true);
19199 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
19201 else if (op_true == CONST0_RTX (mode))
19203 op_false = force_reg (mode, op_false);
19204 x = gen_rtx_NOT (mode, cmp);
19205 x = gen_rtx_AND (mode, x, op_false);
19206 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
19208 else if (INTEGRAL_MODE_P (mode) && op_true == CONSTM1_RTX (mode))
19210 op_false = force_reg (mode, op_false);
19211 x = gen_rtx_IOR (mode, cmp, op_false);
19212 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
19214 else if (TARGET_XOP)
19216 op_true = force_reg (mode, op_true);
19218 if (!nonimmediate_operand (op_false, mode))
19219 op_false = force_reg (mode, op_false);
19221 emit_insn (gen_rtx_SET (mode, dest,
19222 gen_rtx_IF_THEN_ELSE (mode, cmp,
19228 rtx (*gen) (rtx, rtx, rtx, rtx) = NULL;
19230 if (!nonimmediate_operand (op_true, mode))
19231 op_true = force_reg (mode, op_true);
19233 op_false = force_reg (mode, op_false);
19239 gen = gen_sse4_1_blendvps;
19243 gen = gen_sse4_1_blendvpd;
19251 gen = gen_sse4_1_pblendvb;
19252 dest = gen_lowpart (V16QImode, dest);
19253 op_false = gen_lowpart (V16QImode, op_false);
19254 op_true = gen_lowpart (V16QImode, op_true);
19255 cmp = gen_lowpart (V16QImode, cmp);
19260 gen = gen_avx_blendvps256;
19264 gen = gen_avx_blendvpd256;
19272 gen = gen_avx2_pblendvb;
19273 dest = gen_lowpart (V32QImode, dest);
19274 op_false = gen_lowpart (V32QImode, op_false);
19275 op_true = gen_lowpart (V32QImode, op_true);
19276 cmp = gen_lowpart (V32QImode, cmp);
19284 emit_insn (gen (dest, op_false, op_true, cmp));
19287 op_true = force_reg (mode, op_true);
19289 t2 = gen_reg_rtx (mode);
19291 t3 = gen_reg_rtx (mode);
19295 x = gen_rtx_AND (mode, op_true, cmp);
19296 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
19298 x = gen_rtx_NOT (mode, cmp);
19299 x = gen_rtx_AND (mode, x, op_false);
19300 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
19302 x = gen_rtx_IOR (mode, t3, t2);
19303 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
19308 /* Expand a floating-point conditional move. Return true if successful. */
19311 ix86_expand_fp_movcc (rtx operands[])
19313 enum machine_mode mode = GET_MODE (operands[0]);
19314 enum rtx_code code = GET_CODE (operands[1]);
19315 rtx tmp, compare_op;
19316 rtx op0 = XEXP (operands[1], 0);
19317 rtx op1 = XEXP (operands[1], 1);
19319 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
19321 enum machine_mode cmode;
19323 /* Since we've no cmove for sse registers, don't force bad register
19324 allocation just to gain access to it. Deny movcc when the
19325 comparison mode doesn't match the move mode. */
19326 cmode = GET_MODE (op0);
19327 if (cmode == VOIDmode)
19328 cmode = GET_MODE (op1);
19332 code = ix86_prepare_sse_fp_compare_args (operands[0], code, &op0, &op1);
19333 if (code == UNKNOWN)
19336 if (ix86_expand_sse_fp_minmax (operands[0], code, op0, op1,
19337 operands[2], operands[3]))
19340 tmp = ix86_expand_sse_cmp (operands[0], code, op0, op1,
19341 operands[2], operands[3]);
19342 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
19346 /* The floating point conditional move instructions don't directly
19347 support conditions resulting from a signed integer comparison. */
19349 compare_op = ix86_expand_compare (code, op0, op1);
19350 if (!fcmov_comparison_operator (compare_op, VOIDmode))
19352 tmp = gen_reg_rtx (QImode);
19353 ix86_expand_setcc (tmp, code, op0, op1);
19355 compare_op = ix86_expand_compare (NE, tmp, const0_rtx);
19358 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
19359 gen_rtx_IF_THEN_ELSE (mode, compare_op,
19360 operands[2], operands[3])));
19365 /* Expand a floating-point vector conditional move; a vcond operation
19366 rather than a movcc operation. */
19369 ix86_expand_fp_vcond (rtx operands[])
19371 enum rtx_code code = GET_CODE (operands[3]);
19374 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
19375 &operands[4], &operands[5]);
19376 if (code == UNKNOWN)
19379 switch (GET_CODE (operands[3]))
19382 temp = ix86_expand_sse_cmp (operands[0], ORDERED, operands[4],
19383 operands[5], operands[0], operands[0]);
19384 cmp = ix86_expand_sse_cmp (operands[0], NE, operands[4],
19385 operands[5], operands[1], operands[2]);
19389 temp = ix86_expand_sse_cmp (operands[0], UNORDERED, operands[4],
19390 operands[5], operands[0], operands[0]);
19391 cmp = ix86_expand_sse_cmp (operands[0], EQ, operands[4],
19392 operands[5], operands[1], operands[2]);
19396 gcc_unreachable ();
19398 cmp = expand_simple_binop (GET_MODE (cmp), code, temp, cmp, cmp, 1,
19400 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
19404 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
19405 operands[5], operands[1], operands[2]))
19408 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
19409 operands[1], operands[2]);
19410 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
19414 /* Expand a signed/unsigned integral vector conditional move. */
19417 ix86_expand_int_vcond (rtx operands[])
19419 enum machine_mode data_mode = GET_MODE (operands[0]);
19420 enum machine_mode mode = GET_MODE (operands[4]);
19421 enum rtx_code code = GET_CODE (operands[3]);
19422 bool negate = false;
19425 cop0 = operands[4];
19426 cop1 = operands[5];
19428 /* XOP supports all of the comparisons on all vector int types. */
19431 /* Canonicalize the comparison to EQ, GT, GTU. */
19442 code = reverse_condition (code);
19448 code = reverse_condition (code);
19454 code = swap_condition (code);
19455 x = cop0, cop0 = cop1, cop1 = x;
19459 gcc_unreachable ();
19462 /* Only SSE4.1/SSE4.2 supports V2DImode. */
19463 if (mode == V2DImode)
19468 /* SSE4.1 supports EQ. */
19469 if (!TARGET_SSE4_1)
19475 /* SSE4.2 supports GT/GTU. */
19476 if (!TARGET_SSE4_2)
19481 gcc_unreachable ();
19485 /* Unsigned parallel compare is not supported by the hardware.
19486 Play some tricks to turn this into a signed comparison
19490 cop0 = force_reg (mode, cop0);
19500 rtx (*gen_sub3) (rtx, rtx, rtx);
19504 case V8SImode: gen_sub3 = gen_subv8si3; break;
19505 case V4DImode: gen_sub3 = gen_subv4di3; break;
19506 case V4SImode: gen_sub3 = gen_subv4si3; break;
19507 case V2DImode: gen_sub3 = gen_subv2di3; break;
19509 gcc_unreachable ();
19511 /* Subtract (-(INT MAX) - 1) from both operands to make
19513 mask = ix86_build_signbit_mask (mode, true, false);
19514 t1 = gen_reg_rtx (mode);
19515 emit_insn (gen_sub3 (t1, cop0, mask));
19517 t2 = gen_reg_rtx (mode);
19518 emit_insn (gen_sub3 (t2, cop1, mask));
19530 /* Perform a parallel unsigned saturating subtraction. */
19531 x = gen_reg_rtx (mode);
19532 emit_insn (gen_rtx_SET (VOIDmode, x,
19533 gen_rtx_US_MINUS (mode, cop0, cop1)));
19536 cop1 = CONST0_RTX (mode);
19542 gcc_unreachable ();
19547 /* Allow the comparison to be done in one mode, but the movcc to
19548 happen in another mode. */
19549 if (data_mode == mode)
19551 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
19552 operands[1+negate], operands[2-negate]);
19556 gcc_assert (GET_MODE_SIZE (data_mode) == GET_MODE_SIZE (mode));
19557 x = ix86_expand_sse_cmp (gen_lowpart (mode, operands[0]),
19559 operands[1+negate], operands[2-negate]);
19560 x = gen_lowpart (data_mode, x);
19563 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
19564 operands[2-negate]);
19568 /* Expand a variable vector permutation. */
19571 ix86_expand_vec_perm (rtx operands[])
19573 rtx target = operands[0];
19574 rtx op0 = operands[1];
19575 rtx op1 = operands[2];
19576 rtx mask = operands[3];
19577 rtx t1, t2, t3, t4, vt, vt2, vec[32];
19578 enum machine_mode mode = GET_MODE (op0);
19579 enum machine_mode maskmode = GET_MODE (mask);
19581 bool one_operand_shuffle = rtx_equal_p (op0, op1);
19583 /* Number of elements in the vector. */
19584 w = GET_MODE_NUNITS (mode);
19585 e = GET_MODE_UNIT_SIZE (mode);
19586 gcc_assert (w <= 32);
19590 if (mode == V4DImode || mode == V4DFmode || mode == V16HImode)
19592 /* Unfortunately, the VPERMQ and VPERMPD instructions only support
19593 an constant shuffle operand. With a tiny bit of effort we can
19594 use VPERMD instead. A re-interpretation stall for V4DFmode is
19595 unfortunate but there's no avoiding it.
19596 Similarly for V16HImode we don't have instructions for variable
19597 shuffling, while for V32QImode we can use after preparing suitable
19598 masks vpshufb; vpshufb; vpermq; vpor. */
19600 if (mode == V16HImode)
19602 maskmode = mode = V32QImode;
19608 maskmode = mode = V8SImode;
19612 t1 = gen_reg_rtx (maskmode);
19614 /* Replicate the low bits of the V4DImode mask into V8SImode:
19616 t1 = { A A B B C C D D }. */
19617 for (i = 0; i < w / 2; ++i)
19618 vec[i*2 + 1] = vec[i*2] = GEN_INT (i * 2);
19619 vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec));
19620 vt = force_reg (maskmode, vt);
19621 mask = gen_lowpart (maskmode, mask);
19622 if (maskmode == V8SImode)
19623 emit_insn (gen_avx2_permvarv8si (t1, vt, mask));
19625 emit_insn (gen_avx2_pshufbv32qi3 (t1, mask, vt));
19627 /* Multiply the shuffle indicies by two. */
19628 t1 = expand_simple_binop (maskmode, PLUS, t1, t1, t1, 1,
19631 /* Add one to the odd shuffle indicies:
19632 t1 = { A*2, A*2+1, B*2, B*2+1, ... }. */
19633 for (i = 0; i < w / 2; ++i)
19635 vec[i * 2] = const0_rtx;
19636 vec[i * 2 + 1] = const1_rtx;
19638 vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec));
19639 vt = force_const_mem (maskmode, vt);
19640 t1 = expand_simple_binop (maskmode, PLUS, t1, vt, t1, 1,
19643 /* Continue as if V8SImode (resp. V32QImode) was used initially. */
19644 operands[3] = mask = t1;
19645 target = gen_lowpart (mode, target);
19646 op0 = gen_lowpart (mode, op0);
19647 op1 = gen_lowpart (mode, op1);
19653 /* The VPERMD and VPERMPS instructions already properly ignore
19654 the high bits of the shuffle elements. No need for us to
19655 perform an AND ourselves. */
19656 if (one_operand_shuffle)
19657 emit_insn (gen_avx2_permvarv8si (target, mask, op0));
19660 t1 = gen_reg_rtx (V8SImode);
19661 t2 = gen_reg_rtx (V8SImode);
19662 emit_insn (gen_avx2_permvarv8si (t1, mask, op0));
19663 emit_insn (gen_avx2_permvarv8si (t2, mask, op1));
19669 mask = gen_lowpart (V8SFmode, mask);
19670 if (one_operand_shuffle)
19671 emit_insn (gen_avx2_permvarv8sf (target, mask, op0));
19674 t1 = gen_reg_rtx (V8SFmode);
19675 t2 = gen_reg_rtx (V8SFmode);
19676 emit_insn (gen_avx2_permvarv8sf (t1, mask, op0));
19677 emit_insn (gen_avx2_permvarv8sf (t2, mask, op1));
19683 /* By combining the two 128-bit input vectors into one 256-bit
19684 input vector, we can use VPERMD and VPERMPS for the full
19685 two-operand shuffle. */
19686 t1 = gen_reg_rtx (V8SImode);
19687 t2 = gen_reg_rtx (V8SImode);
19688 emit_insn (gen_avx_vec_concatv8si (t1, op0, op1));
19689 emit_insn (gen_avx_vec_concatv8si (t2, mask, mask));
19690 emit_insn (gen_avx2_permvarv8si (t1, t2, t1));
19691 emit_insn (gen_avx_vextractf128v8si (target, t1, const0_rtx));
19695 t1 = gen_reg_rtx (V8SFmode);
19696 t2 = gen_reg_rtx (V8SFmode);
19697 mask = gen_lowpart (V4SFmode, mask);
19698 emit_insn (gen_avx_vec_concatv8sf (t1, op0, op1));
19699 emit_insn (gen_avx_vec_concatv8sf (t2, mask, mask));
19700 emit_insn (gen_avx2_permvarv8sf (t1, t2, t1));
19701 emit_insn (gen_avx_vextractf128v8sf (target, t1, const0_rtx));
19705 t1 = gen_reg_rtx (V32QImode);
19706 t2 = gen_reg_rtx (V32QImode);
19707 t3 = gen_reg_rtx (V32QImode);
19708 vt2 = GEN_INT (128);
19709 for (i = 0; i < 32; i++)
19711 vt = gen_rtx_CONST_VECTOR (V32QImode, gen_rtvec_v (32, vec));
19712 vt = force_reg (V32QImode, vt);
19713 for (i = 0; i < 32; i++)
19714 vec[i] = i < 16 ? vt2 : const0_rtx;
19715 vt2 = gen_rtx_CONST_VECTOR (V32QImode, gen_rtvec_v (32, vec));
19716 vt2 = force_reg (V32QImode, vt2);
19717 /* From mask create two adjusted masks, which contain the same
19718 bits as mask in the low 7 bits of each vector element.
19719 The first mask will have the most significant bit clear
19720 if it requests element from the same 128-bit lane
19721 and MSB set if it requests element from the other 128-bit lane.
19722 The second mask will have the opposite values of the MSB,
19723 and additionally will have its 128-bit lanes swapped.
19724 E.g. { 07 12 1e 09 ... | 17 19 05 1f ... } mask vector will have
19725 t1 { 07 92 9e 09 ... | 17 19 85 1f ... } and
19726 t3 { 97 99 05 9f ... | 87 12 1e 89 ... } where each ...
19727 stands for other 12 bytes. */
19728 /* The bit whether element is from the same lane or the other
19729 lane is bit 4, so shift it up by 3 to the MSB position. */
19730 emit_insn (gen_ashlv4di3 (gen_lowpart (V4DImode, t1),
19731 gen_lowpart (V4DImode, mask),
19733 /* Clear MSB bits from the mask just in case it had them set. */
19734 emit_insn (gen_avx2_andnotv32qi3 (t2, vt, mask));
19735 /* After this t1 will have MSB set for elements from other lane. */
19736 emit_insn (gen_xorv32qi3 (t1, t1, vt2));
19737 /* Clear bits other than MSB. */
19738 emit_insn (gen_andv32qi3 (t1, t1, vt));
19739 /* Or in the lower bits from mask into t3. */
19740 emit_insn (gen_iorv32qi3 (t3, t1, t2));
19741 /* And invert MSB bits in t1, so MSB is set for elements from the same
19743 emit_insn (gen_xorv32qi3 (t1, t1, vt));
19744 /* Swap 128-bit lanes in t3. */
19745 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode, t3),
19746 gen_lowpart (V4DImode, t3),
19747 const2_rtx, GEN_INT (3),
19748 const0_rtx, const1_rtx));
19749 /* And or in the lower bits from mask into t1. */
19750 emit_insn (gen_iorv32qi3 (t1, t1, t2));
19751 if (one_operand_shuffle)
19753 /* Each of these shuffles will put 0s in places where
19754 element from the other 128-bit lane is needed, otherwise
19755 will shuffle in the requested value. */
19756 emit_insn (gen_avx2_pshufbv32qi3 (t3, op0, t3));
19757 emit_insn (gen_avx2_pshufbv32qi3 (t1, op0, t1));
19758 /* For t3 the 128-bit lanes are swapped again. */
19759 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode, t3),
19760 gen_lowpart (V4DImode, t3),
19761 const2_rtx, GEN_INT (3),
19762 const0_rtx, const1_rtx));
19763 /* And oring both together leads to the result. */
19764 emit_insn (gen_iorv32qi3 (target, t1, t3));
19768 t4 = gen_reg_rtx (V32QImode);
19769 /* Similarly to the above one_operand_shuffle code,
19770 just for repeated twice for each operand. merge_two:
19771 code will merge the two results together. */
19772 emit_insn (gen_avx2_pshufbv32qi3 (t4, op0, t3));
19773 emit_insn (gen_avx2_pshufbv32qi3 (t3, op1, t3));
19774 emit_insn (gen_avx2_pshufbv32qi3 (t2, op0, t1));
19775 emit_insn (gen_avx2_pshufbv32qi3 (t1, op1, t1));
19776 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode, t4),
19777 gen_lowpart (V4DImode, t4),
19778 const2_rtx, GEN_INT (3),
19779 const0_rtx, const1_rtx));
19780 emit_insn (gen_avx2_permv4di_1 (gen_lowpart (V4DImode, t3),
19781 gen_lowpart (V4DImode, t3),
19782 const2_rtx, GEN_INT (3),
19783 const0_rtx, const1_rtx));
19784 emit_insn (gen_iorv32qi3 (t4, t2, t4));
19785 emit_insn (gen_iorv32qi3 (t3, t1, t3));
19791 gcc_assert (GET_MODE_SIZE (mode) <= 16);
19798 /* The XOP VPPERM insn supports three inputs. By ignoring the
19799 one_operand_shuffle special case, we avoid creating another
19800 set of constant vectors in memory. */
19801 one_operand_shuffle = false;
19803 /* mask = mask & {2*w-1, ...} */
19804 vt = GEN_INT (2*w - 1);
19808 /* mask = mask & {w-1, ...} */
19809 vt = GEN_INT (w - 1);
19812 for (i = 0; i < w; i++)
19814 vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec));
19815 mask = expand_simple_binop (maskmode, AND, mask, vt,
19816 NULL_RTX, 0, OPTAB_DIRECT);
19818 /* For non-QImode operations, convert the word permutation control
19819 into a byte permutation control. */
19820 if (mode != V16QImode)
19822 mask = expand_simple_binop (maskmode, ASHIFT, mask,
19823 GEN_INT (exact_log2 (e)),
19824 NULL_RTX, 0, OPTAB_DIRECT);
19826 /* Convert mask to vector of chars. */
19827 mask = force_reg (V16QImode, gen_lowpart (V16QImode, mask));
19829 /* Replicate each of the input bytes into byte positions:
19830 (v2di) --> {0,0,0,0,0,0,0,0, 8,8,8,8,8,8,8,8}
19831 (v4si) --> {0,0,0,0, 4,4,4,4, 8,8,8,8, 12,12,12,12}
19832 (v8hi) --> {0,0, 2,2, 4,4, 6,6, ...}. */
19833 for (i = 0; i < 16; ++i)
19834 vec[i] = GEN_INT (i/e * e);
19835 vt = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, vec));
19836 vt = force_const_mem (V16QImode, vt);
19838 emit_insn (gen_xop_pperm (mask, mask, mask, vt));
19840 emit_insn (gen_ssse3_pshufbv16qi3 (mask, mask, vt));
19842 /* Convert it into the byte positions by doing
19843 mask = mask + {0,1,..,16/w, 0,1,..,16/w, ...} */
19844 for (i = 0; i < 16; ++i)
19845 vec[i] = GEN_INT (i % e);
19846 vt = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, vec));
19847 vt = force_const_mem (V16QImode, vt);
19848 emit_insn (gen_addv16qi3 (mask, mask, vt));
19851 /* The actual shuffle operations all operate on V16QImode. */
19852 op0 = gen_lowpart (V16QImode, op0);
19853 op1 = gen_lowpart (V16QImode, op1);
19854 target = gen_lowpart (V16QImode, target);
19858 emit_insn (gen_xop_pperm (target, op0, op1, mask));
19860 else if (one_operand_shuffle)
19862 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, mask));
19869 /* Shuffle the two input vectors independently. */
19870 t1 = gen_reg_rtx (V16QImode);
19871 t2 = gen_reg_rtx (V16QImode);
19872 emit_insn (gen_ssse3_pshufbv16qi3 (t1, op0, mask));
19873 emit_insn (gen_ssse3_pshufbv16qi3 (t2, op1, mask));
19876 /* Then merge them together. The key is whether any given control
19877 element contained a bit set that indicates the second word. */
19878 mask = operands[3];
19880 if (maskmode == V2DImode && !TARGET_SSE4_1)
19882 /* Without SSE4.1, we don't have V2DImode EQ. Perform one
19883 more shuffle to convert the V2DI input mask into a V4SI
19884 input mask. At which point the masking that expand_int_vcond
19885 will work as desired. */
19886 rtx t3 = gen_reg_rtx (V4SImode);
19887 emit_insn (gen_sse2_pshufd_1 (t3, gen_lowpart (V4SImode, mask),
19888 const0_rtx, const0_rtx,
19889 const2_rtx, const2_rtx));
19891 maskmode = V4SImode;
19895 for (i = 0; i < w; i++)
19897 vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec));
19898 vt = force_reg (maskmode, vt);
19899 mask = expand_simple_binop (maskmode, AND, mask, vt,
19900 NULL_RTX, 0, OPTAB_DIRECT);
19902 xops[0] = gen_lowpart (mode, operands[0]);
19903 xops[1] = gen_lowpart (mode, t2);
19904 xops[2] = gen_lowpart (mode, t1);
19905 xops[3] = gen_rtx_EQ (maskmode, mask, vt);
19908 ok = ix86_expand_int_vcond (xops);
19913 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
19914 true if we should do zero extension, else sign extension. HIGH_P is
19915 true if we want the N/2 high elements, else the low elements. */
19918 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
19920 enum machine_mode imode = GET_MODE (operands[1]);
19925 rtx (*unpack)(rtx, rtx);
19926 rtx (*extract)(rtx, rtx) = NULL;
19927 enum machine_mode halfmode = BLKmode;
19933 unpack = gen_avx2_zero_extendv16qiv16hi2;
19935 unpack = gen_avx2_sign_extendv16qiv16hi2;
19936 halfmode = V16QImode;
19938 = high_p ? gen_vec_extract_hi_v32qi : gen_vec_extract_lo_v32qi;
19942 unpack = gen_avx2_zero_extendv8hiv8si2;
19944 unpack = gen_avx2_sign_extendv8hiv8si2;
19945 halfmode = V8HImode;
19947 = high_p ? gen_vec_extract_hi_v16hi : gen_vec_extract_lo_v16hi;
19951 unpack = gen_avx2_zero_extendv4siv4di2;
19953 unpack = gen_avx2_sign_extendv4siv4di2;
19954 halfmode = V4SImode;
19956 = high_p ? gen_vec_extract_hi_v8si : gen_vec_extract_lo_v8si;
19960 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
19962 unpack = gen_sse4_1_sign_extendv8qiv8hi2;
19966 unpack = gen_sse4_1_zero_extendv4hiv4si2;
19968 unpack = gen_sse4_1_sign_extendv4hiv4si2;
19972 unpack = gen_sse4_1_zero_extendv2siv2di2;
19974 unpack = gen_sse4_1_sign_extendv2siv2di2;
19977 gcc_unreachable ();
19980 if (GET_MODE_SIZE (imode) == 32)
19982 tmp = gen_reg_rtx (halfmode);
19983 emit_insn (extract (tmp, operands[1]));
19987 /* Shift higher 8 bytes to lower 8 bytes. */
19988 tmp = gen_reg_rtx (imode);
19989 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, tmp),
19990 gen_lowpart (V1TImode, operands[1]),
19996 emit_insn (unpack (operands[0], tmp));
20000 rtx (*unpack)(rtx, rtx, rtx);
20006 unpack = gen_vec_interleave_highv16qi;
20008 unpack = gen_vec_interleave_lowv16qi;
20012 unpack = gen_vec_interleave_highv8hi;
20014 unpack = gen_vec_interleave_lowv8hi;
20018 unpack = gen_vec_interleave_highv4si;
20020 unpack = gen_vec_interleave_lowv4si;
20023 gcc_unreachable ();
20026 dest = gen_lowpart (imode, operands[0]);
20029 tmp = force_reg (imode, CONST0_RTX (imode));
20031 tmp = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
20032 operands[1], pc_rtx, pc_rtx);
20034 emit_insn (unpack (dest, operands[1], tmp));
20038 /* Expand conditional increment or decrement using adb/sbb instructions.
20039 The default case using setcc followed by the conditional move can be
20040 done by generic code. */
20042 ix86_expand_int_addcc (rtx operands[])
20044 enum rtx_code code = GET_CODE (operands[1]);
20046 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
20048 rtx val = const0_rtx;
20049 bool fpcmp = false;
20050 enum machine_mode mode;
20051 rtx op0 = XEXP (operands[1], 0);
20052 rtx op1 = XEXP (operands[1], 1);
20054 if (operands[3] != const1_rtx
20055 && operands[3] != constm1_rtx)
20057 if (!ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
20059 code = GET_CODE (compare_op);
20061 flags = XEXP (compare_op, 0);
20063 if (GET_MODE (flags) == CCFPmode
20064 || GET_MODE (flags) == CCFPUmode)
20067 code = ix86_fp_compare_code_to_integer (code);
20074 PUT_CODE (compare_op,
20075 reverse_condition_maybe_unordered
20076 (GET_CODE (compare_op)));
20078 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
20081 mode = GET_MODE (operands[0]);
20083 /* Construct either adc or sbb insn. */
20084 if ((code == LTU) == (operands[3] == constm1_rtx))
20089 insn = gen_subqi3_carry;
20092 insn = gen_subhi3_carry;
20095 insn = gen_subsi3_carry;
20098 insn = gen_subdi3_carry;
20101 gcc_unreachable ();
20109 insn = gen_addqi3_carry;
20112 insn = gen_addhi3_carry;
20115 insn = gen_addsi3_carry;
20118 insn = gen_adddi3_carry;
20121 gcc_unreachable ();
20124 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
20130 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
20131 but works for floating pointer parameters and nonoffsetable memories.
20132 For pushes, it returns just stack offsets; the values will be saved
20133 in the right order. Maximally three parts are generated. */
20136 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
20141 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
20143 size = (GET_MODE_SIZE (mode) + 4) / 8;
20145 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
20146 gcc_assert (size >= 2 && size <= 4);
20148 /* Optimize constant pool reference to immediates. This is used by fp
20149 moves, that force all constants to memory to allow combining. */
20150 if (MEM_P (operand) && MEM_READONLY_P (operand))
20152 rtx tmp = maybe_get_pool_constant (operand);
20157 if (MEM_P (operand) && !offsettable_memref_p (operand))
20159 /* The only non-offsetable memories we handle are pushes. */
20160 int ok = push_operand (operand, VOIDmode);
20164 operand = copy_rtx (operand);
20165 PUT_MODE (operand, Pmode);
20166 parts[0] = parts[1] = parts[2] = parts[3] = operand;
20170 if (GET_CODE (operand) == CONST_VECTOR)
20172 enum machine_mode imode = int_mode_for_mode (mode);
20173 /* Caution: if we looked through a constant pool memory above,
20174 the operand may actually have a different mode now. That's
20175 ok, since we want to pun this all the way back to an integer. */
20176 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
20177 gcc_assert (operand != NULL);
20183 if (mode == DImode)
20184 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
20189 if (REG_P (operand))
20191 gcc_assert (reload_completed);
20192 for (i = 0; i < size; i++)
20193 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
20195 else if (offsettable_memref_p (operand))
20197 operand = adjust_address (operand, SImode, 0);
20198 parts[0] = operand;
20199 for (i = 1; i < size; i++)
20200 parts[i] = adjust_address (operand, SImode, 4 * i);
20202 else if (GET_CODE (operand) == CONST_DOUBLE)
20207 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
20211 real_to_target (l, &r, mode);
20212 parts[3] = gen_int_mode (l[3], SImode);
20213 parts[2] = gen_int_mode (l[2], SImode);
20216 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
20217 parts[2] = gen_int_mode (l[2], SImode);
20220 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
20223 gcc_unreachable ();
20225 parts[1] = gen_int_mode (l[1], SImode);
20226 parts[0] = gen_int_mode (l[0], SImode);
20229 gcc_unreachable ();
20234 if (mode == TImode)
20235 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
20236 if (mode == XFmode || mode == TFmode)
20238 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
20239 if (REG_P (operand))
20241 gcc_assert (reload_completed);
20242 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
20243 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
20245 else if (offsettable_memref_p (operand))
20247 operand = adjust_address (operand, DImode, 0);
20248 parts[0] = operand;
20249 parts[1] = adjust_address (operand, upper_mode, 8);
20251 else if (GET_CODE (operand) == CONST_DOUBLE)
20256 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
20257 real_to_target (l, &r, mode);
20259 /* Do not use shift by 32 to avoid warning on 32bit systems. */
20260 if (HOST_BITS_PER_WIDE_INT >= 64)
20263 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
20264 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
20267 parts[0] = immed_double_const (l[0], l[1], DImode);
20269 if (upper_mode == SImode)
20270 parts[1] = gen_int_mode (l[2], SImode);
20271 else if (HOST_BITS_PER_WIDE_INT >= 64)
20274 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
20275 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
20278 parts[1] = immed_double_const (l[2], l[3], DImode);
20281 gcc_unreachable ();
20288 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
20289 Return false when normal moves are needed; true when all required
20290 insns have been emitted. Operands 2-4 contain the input values
20291 int the correct order; operands 5-7 contain the output values. */
20294 ix86_split_long_move (rtx operands[])
20299 int collisions = 0;
20300 enum machine_mode mode = GET_MODE (operands[0]);
20301 bool collisionparts[4];
20303 /* The DFmode expanders may ask us to move double.
20304 For 64bit target this is single move. By hiding the fact
20305 here we simplify i386.md splitters. */
20306 if (TARGET_64BIT && GET_MODE_SIZE (GET_MODE (operands[0])) == 8)
20308 /* Optimize constant pool reference to immediates. This is used by
20309 fp moves, that force all constants to memory to allow combining. */
20311 if (MEM_P (operands[1])
20312 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
20313 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
20314 operands[1] = get_pool_constant (XEXP (operands[1], 0));
20315 if (push_operand (operands[0], VOIDmode))
20317 operands[0] = copy_rtx (operands[0]);
20318 PUT_MODE (operands[0], Pmode);
20321 operands[0] = gen_lowpart (DImode, operands[0]);
20322 operands[1] = gen_lowpart (DImode, operands[1]);
20323 emit_move_insn (operands[0], operands[1]);
20327 /* The only non-offsettable memory we handle is push. */
20328 if (push_operand (operands[0], VOIDmode))
20331 gcc_assert (!MEM_P (operands[0])
20332 || offsettable_memref_p (operands[0]));
20334 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
20335 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
20337 /* When emitting push, take care for source operands on the stack. */
20338 if (push && MEM_P (operands[1])
20339 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
20341 rtx src_base = XEXP (part[1][nparts - 1], 0);
20343 /* Compensate for the stack decrement by 4. */
20344 if (!TARGET_64BIT && nparts == 3
20345 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
20346 src_base = plus_constant (src_base, 4);
20348 /* src_base refers to the stack pointer and is
20349 automatically decreased by emitted push. */
20350 for (i = 0; i < nparts; i++)
20351 part[1][i] = change_address (part[1][i],
20352 GET_MODE (part[1][i]), src_base);
20355 /* We need to do copy in the right order in case an address register
20356 of the source overlaps the destination. */
20357 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
20361 for (i = 0; i < nparts; i++)
20364 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
20365 if (collisionparts[i])
20369 /* Collision in the middle part can be handled by reordering. */
20370 if (collisions == 1 && nparts == 3 && collisionparts [1])
20372 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
20373 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
20375 else if (collisions == 1
20377 && (collisionparts [1] || collisionparts [2]))
20379 if (collisionparts [1])
20381 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
20382 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
20386 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
20387 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
20391 /* If there are more collisions, we can't handle it by reordering.
20392 Do an lea to the last part and use only one colliding move. */
20393 else if (collisions > 1)
20399 base = part[0][nparts - 1];
20401 /* Handle the case when the last part isn't valid for lea.
20402 Happens in 64-bit mode storing the 12-byte XFmode. */
20403 if (GET_MODE (base) != Pmode)
20404 base = gen_rtx_REG (Pmode, REGNO (base));
20406 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
20407 part[1][0] = replace_equiv_address (part[1][0], base);
20408 for (i = 1; i < nparts; i++)
20410 tmp = plus_constant (base, UNITS_PER_WORD * i);
20411 part[1][i] = replace_equiv_address (part[1][i], tmp);
20422 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
20423 emit_insn (gen_addsi3 (stack_pointer_rtx,
20424 stack_pointer_rtx, GEN_INT (-4)));
20425 emit_move_insn (part[0][2], part[1][2]);
20427 else if (nparts == 4)
20429 emit_move_insn (part[0][3], part[1][3]);
20430 emit_move_insn (part[0][2], part[1][2]);
20435 /* In 64bit mode we don't have 32bit push available. In case this is
20436 register, it is OK - we will just use larger counterpart. We also
20437 retype memory - these comes from attempt to avoid REX prefix on
20438 moving of second half of TFmode value. */
20439 if (GET_MODE (part[1][1]) == SImode)
20441 switch (GET_CODE (part[1][1]))
20444 part[1][1] = adjust_address (part[1][1], DImode, 0);
20448 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
20452 gcc_unreachable ();
20455 if (GET_MODE (part[1][0]) == SImode)
20456 part[1][0] = part[1][1];
20459 emit_move_insn (part[0][1], part[1][1]);
20460 emit_move_insn (part[0][0], part[1][0]);
20464 /* Choose correct order to not overwrite the source before it is copied. */
20465 if ((REG_P (part[0][0])
20466 && REG_P (part[1][1])
20467 && (REGNO (part[0][0]) == REGNO (part[1][1])
20469 && REGNO (part[0][0]) == REGNO (part[1][2]))
20471 && REGNO (part[0][0]) == REGNO (part[1][3]))))
20473 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
20475 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
20477 operands[2 + i] = part[0][j];
20478 operands[6 + i] = part[1][j];
20483 for (i = 0; i < nparts; i++)
20485 operands[2 + i] = part[0][i];
20486 operands[6 + i] = part[1][i];
20490 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
20491 if (optimize_insn_for_size_p ())
20493 for (j = 0; j < nparts - 1; j++)
20494 if (CONST_INT_P (operands[6 + j])
20495 && operands[6 + j] != const0_rtx
20496 && REG_P (operands[2 + j]))
20497 for (i = j; i < nparts - 1; i++)
20498 if (CONST_INT_P (operands[7 + i])
20499 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
20500 operands[7 + i] = operands[2 + j];
20503 for (i = 0; i < nparts; i++)
20504 emit_move_insn (operands[2 + i], operands[6 + i]);
20509 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
20510 left shift by a constant, either using a single shift or
20511 a sequence of add instructions. */
20514 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
20516 rtx (*insn)(rtx, rtx, rtx);
20519 || (count * ix86_cost->add <= ix86_cost->shift_const
20520 && !optimize_insn_for_size_p ()))
20522 insn = mode == DImode ? gen_addsi3 : gen_adddi3;
20523 while (count-- > 0)
20524 emit_insn (insn (operand, operand, operand));
20528 insn = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
20529 emit_insn (insn (operand, operand, GEN_INT (count)));
20534 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
20536 rtx (*gen_ashl3)(rtx, rtx, rtx);
20537 rtx (*gen_shld)(rtx, rtx, rtx);
20538 int half_width = GET_MODE_BITSIZE (mode) >> 1;
20540 rtx low[2], high[2];
20543 if (CONST_INT_P (operands[2]))
20545 split_double_mode (mode, operands, 2, low, high);
20546 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
20548 if (count >= half_width)
20550 emit_move_insn (high[0], low[1]);
20551 emit_move_insn (low[0], const0_rtx);
20553 if (count > half_width)
20554 ix86_expand_ashl_const (high[0], count - half_width, mode);
20558 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
20560 if (!rtx_equal_p (operands[0], operands[1]))
20561 emit_move_insn (operands[0], operands[1]);
20563 emit_insn (gen_shld (high[0], low[0], GEN_INT (count)));
20564 ix86_expand_ashl_const (low[0], count, mode);
20569 split_double_mode (mode, operands, 1, low, high);
20571 gen_ashl3 = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
20573 if (operands[1] == const1_rtx)
20575 /* Assuming we've chosen a QImode capable registers, then 1 << N
20576 can be done with two 32/64-bit shifts, no branches, no cmoves. */
20577 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
20579 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
20581 ix86_expand_clear (low[0]);
20582 ix86_expand_clear (high[0]);
20583 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (half_width)));
20585 d = gen_lowpart (QImode, low[0]);
20586 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
20587 s = gen_rtx_EQ (QImode, flags, const0_rtx);
20588 emit_insn (gen_rtx_SET (VOIDmode, d, s));
20590 d = gen_lowpart (QImode, high[0]);
20591 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
20592 s = gen_rtx_NE (QImode, flags, const0_rtx);
20593 emit_insn (gen_rtx_SET (VOIDmode, d, s));
20596 /* Otherwise, we can get the same results by manually performing
20597 a bit extract operation on bit 5/6, and then performing the two
20598 shifts. The two methods of getting 0/1 into low/high are exactly
20599 the same size. Avoiding the shift in the bit extract case helps
20600 pentium4 a bit; no one else seems to care much either way. */
20603 enum machine_mode half_mode;
20604 rtx (*gen_lshr3)(rtx, rtx, rtx);
20605 rtx (*gen_and3)(rtx, rtx, rtx);
20606 rtx (*gen_xor3)(rtx, rtx, rtx);
20607 HOST_WIDE_INT bits;
20610 if (mode == DImode)
20612 half_mode = SImode;
20613 gen_lshr3 = gen_lshrsi3;
20614 gen_and3 = gen_andsi3;
20615 gen_xor3 = gen_xorsi3;
20620 half_mode = DImode;
20621 gen_lshr3 = gen_lshrdi3;
20622 gen_and3 = gen_anddi3;
20623 gen_xor3 = gen_xordi3;
20627 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
20628 x = gen_rtx_ZERO_EXTEND (half_mode, operands[2]);
20630 x = gen_lowpart (half_mode, operands[2]);
20631 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
20633 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (bits)));
20634 emit_insn (gen_and3 (high[0], high[0], const1_rtx));
20635 emit_move_insn (low[0], high[0]);
20636 emit_insn (gen_xor3 (low[0], low[0], const1_rtx));
20639 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
20640 emit_insn (gen_ashl3 (high[0], high[0], operands[2]));
20644 if (operands[1] == constm1_rtx)
20646 /* For -1 << N, we can avoid the shld instruction, because we
20647 know that we're shifting 0...31/63 ones into a -1. */
20648 emit_move_insn (low[0], constm1_rtx);
20649 if (optimize_insn_for_size_p ())
20650 emit_move_insn (high[0], low[0]);
20652 emit_move_insn (high[0], constm1_rtx);
20656 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
20658 if (!rtx_equal_p (operands[0], operands[1]))
20659 emit_move_insn (operands[0], operands[1]);
20661 split_double_mode (mode, operands, 1, low, high);
20662 emit_insn (gen_shld (high[0], low[0], operands[2]));
20665 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
20667 if (TARGET_CMOVE && scratch)
20669 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
20670 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
20672 ix86_expand_clear (scratch);
20673 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
20677 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
20678 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
20680 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
20685 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
20687 rtx (*gen_ashr3)(rtx, rtx, rtx)
20688 = mode == DImode ? gen_ashrsi3 : gen_ashrdi3;
20689 rtx (*gen_shrd)(rtx, rtx, rtx);
20690 int half_width = GET_MODE_BITSIZE (mode) >> 1;
20692 rtx low[2], high[2];
20695 if (CONST_INT_P (operands[2]))
20697 split_double_mode (mode, operands, 2, low, high);
20698 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
20700 if (count == GET_MODE_BITSIZE (mode) - 1)
20702 emit_move_insn (high[0], high[1]);
20703 emit_insn (gen_ashr3 (high[0], high[0],
20704 GEN_INT (half_width - 1)));
20705 emit_move_insn (low[0], high[0]);
20708 else if (count >= half_width)
20710 emit_move_insn (low[0], high[1]);
20711 emit_move_insn (high[0], low[0]);
20712 emit_insn (gen_ashr3 (high[0], high[0],
20713 GEN_INT (half_width - 1)));
20715 if (count > half_width)
20716 emit_insn (gen_ashr3 (low[0], low[0],
20717 GEN_INT (count - half_width)));
20721 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
20723 if (!rtx_equal_p (operands[0], operands[1]))
20724 emit_move_insn (operands[0], operands[1]);
20726 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
20727 emit_insn (gen_ashr3 (high[0], high[0], GEN_INT (count)));
20732 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
20734 if (!rtx_equal_p (operands[0], operands[1]))
20735 emit_move_insn (operands[0], operands[1]);
20737 split_double_mode (mode, operands, 1, low, high);
20739 emit_insn (gen_shrd (low[0], high[0], operands[2]));
20740 emit_insn (gen_ashr3 (high[0], high[0], operands[2]));
20742 if (TARGET_CMOVE && scratch)
20744 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
20745 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
20747 emit_move_insn (scratch, high[0]);
20748 emit_insn (gen_ashr3 (scratch, scratch,
20749 GEN_INT (half_width - 1)));
20750 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
20755 rtx (*gen_x86_shift_adj_3)(rtx, rtx, rtx)
20756 = mode == DImode ? gen_x86_shiftsi_adj_3 : gen_x86_shiftdi_adj_3;
20758 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
20764 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
20766 rtx (*gen_lshr3)(rtx, rtx, rtx)
20767 = mode == DImode ? gen_lshrsi3 : gen_lshrdi3;
20768 rtx (*gen_shrd)(rtx, rtx, rtx);
20769 int half_width = GET_MODE_BITSIZE (mode) >> 1;
20771 rtx low[2], high[2];
20774 if (CONST_INT_P (operands[2]))
20776 split_double_mode (mode, operands, 2, low, high);
20777 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
20779 if (count >= half_width)
20781 emit_move_insn (low[0], high[1]);
20782 ix86_expand_clear (high[0]);
20784 if (count > half_width)
20785 emit_insn (gen_lshr3 (low[0], low[0],
20786 GEN_INT (count - half_width)));
20790 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
20792 if (!rtx_equal_p (operands[0], operands[1]))
20793 emit_move_insn (operands[0], operands[1]);
20795 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
20796 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (count)));
20801 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
20803 if (!rtx_equal_p (operands[0], operands[1]))
20804 emit_move_insn (operands[0], operands[1]);
20806 split_double_mode (mode, operands, 1, low, high);
20808 emit_insn (gen_shrd (low[0], high[0], operands[2]));
20809 emit_insn (gen_lshr3 (high[0], high[0], operands[2]));
20811 if (TARGET_CMOVE && scratch)
20813 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
20814 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
20816 ix86_expand_clear (scratch);
20817 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
20822 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
20823 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
20825 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
20830 /* Predict just emitted jump instruction to be taken with probability PROB. */
20832 predict_jump (int prob)
20834 rtx insn = get_last_insn ();
20835 gcc_assert (JUMP_P (insn));
20836 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
20839 /* Helper function for the string operations below. Dest VARIABLE whether
20840 it is aligned to VALUE bytes. If true, jump to the label. */
20842 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
20844 rtx label = gen_label_rtx ();
20845 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
20846 if (GET_MODE (variable) == DImode)
20847 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
20849 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
20850 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
20853 predict_jump (REG_BR_PROB_BASE * 50 / 100);
20855 predict_jump (REG_BR_PROB_BASE * 90 / 100);
20859 /* Adjust COUNTER by the VALUE. */
20861 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
20863 rtx (*gen_add)(rtx, rtx, rtx)
20864 = GET_MODE (countreg) == DImode ? gen_adddi3 : gen_addsi3;
20866 emit_insn (gen_add (countreg, countreg, GEN_INT (-value)));
20869 /* Zero extend possibly SImode EXP to Pmode register. */
20871 ix86_zero_extend_to_Pmode (rtx exp)
20874 if (GET_MODE (exp) == VOIDmode)
20875 return force_reg (Pmode, exp);
20876 if (GET_MODE (exp) == Pmode)
20877 return copy_to_mode_reg (Pmode, exp);
20878 r = gen_reg_rtx (Pmode);
20879 emit_insn (gen_zero_extendsidi2 (r, exp));
20883 /* Divide COUNTREG by SCALE. */
20885 scale_counter (rtx countreg, int scale)
20891 if (CONST_INT_P (countreg))
20892 return GEN_INT (INTVAL (countreg) / scale);
20893 gcc_assert (REG_P (countreg));
20895 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
20896 GEN_INT (exact_log2 (scale)),
20897 NULL, 1, OPTAB_DIRECT);
20901 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
20902 DImode for constant loop counts. */
20904 static enum machine_mode
20905 counter_mode (rtx count_exp)
20907 if (GET_MODE (count_exp) != VOIDmode)
20908 return GET_MODE (count_exp);
20909 if (!CONST_INT_P (count_exp))
20911 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
20916 /* When SRCPTR is non-NULL, output simple loop to move memory
20917 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
20918 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
20919 equivalent loop to set memory by VALUE (supposed to be in MODE).
20921 The size is rounded down to whole number of chunk size moved at once.
20922 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
20926 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
20927 rtx destptr, rtx srcptr, rtx value,
20928 rtx count, enum machine_mode mode, int unroll,
20931 rtx out_label, top_label, iter, tmp;
20932 enum machine_mode iter_mode = counter_mode (count);
20933 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
20934 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
20940 top_label = gen_label_rtx ();
20941 out_label = gen_label_rtx ();
20942 iter = gen_reg_rtx (iter_mode);
20944 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
20945 NULL, 1, OPTAB_DIRECT);
20946 /* Those two should combine. */
20947 if (piece_size == const1_rtx)
20949 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
20951 predict_jump (REG_BR_PROB_BASE * 10 / 100);
20953 emit_move_insn (iter, const0_rtx);
20955 emit_label (top_label);
20957 tmp = convert_modes (Pmode, iter_mode, iter, true);
20958 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
20959 destmem = change_address (destmem, mode, x_addr);
20963 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
20964 srcmem = change_address (srcmem, mode, y_addr);
20966 /* When unrolling for chips that reorder memory reads and writes,
20967 we can save registers by using single temporary.
20968 Also using 4 temporaries is overkill in 32bit mode. */
20969 if (!TARGET_64BIT && 0)
20971 for (i = 0; i < unroll; i++)
20976 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
20978 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20980 emit_move_insn (destmem, srcmem);
20986 gcc_assert (unroll <= 4);
20987 for (i = 0; i < unroll; i++)
20989 tmpreg[i] = gen_reg_rtx (mode);
20993 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
20995 emit_move_insn (tmpreg[i], srcmem);
20997 for (i = 0; i < unroll; i++)
21002 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
21004 emit_move_insn (destmem, tmpreg[i]);
21009 for (i = 0; i < unroll; i++)
21013 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
21014 emit_move_insn (destmem, value);
21017 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
21018 true, OPTAB_LIB_WIDEN);
21020 emit_move_insn (iter, tmp);
21022 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
21024 if (expected_size != -1)
21026 expected_size /= GET_MODE_SIZE (mode) * unroll;
21027 if (expected_size == 0)
21029 else if (expected_size > REG_BR_PROB_BASE)
21030 predict_jump (REG_BR_PROB_BASE - 1);
21032 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
21035 predict_jump (REG_BR_PROB_BASE * 80 / 100);
21036 iter = ix86_zero_extend_to_Pmode (iter);
21037 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
21038 true, OPTAB_LIB_WIDEN);
21039 if (tmp != destptr)
21040 emit_move_insn (destptr, tmp);
21043 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
21044 true, OPTAB_LIB_WIDEN);
21046 emit_move_insn (srcptr, tmp);
21048 emit_label (out_label);
21051 /* Output "rep; mov" instruction.
21052 Arguments have same meaning as for previous function */
21054 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
21055 rtx destptr, rtx srcptr,
21057 enum machine_mode mode)
21062 HOST_WIDE_INT rounded_count;
21064 /* If the size is known, it is shorter to use rep movs. */
21065 if (mode == QImode && CONST_INT_P (count)
21066 && !(INTVAL (count) & 3))
21069 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
21070 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
21071 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
21072 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
21073 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
21074 if (mode != QImode)
21076 destexp = gen_rtx_ASHIFT (Pmode, countreg,
21077 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
21078 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
21079 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
21080 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
21081 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
21085 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
21086 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
21088 if (CONST_INT_P (count))
21090 rounded_count = (INTVAL (count)
21091 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
21092 destmem = shallow_copy_rtx (destmem);
21093 srcmem = shallow_copy_rtx (srcmem);
21094 set_mem_size (destmem, rounded_count);
21095 set_mem_size (srcmem, rounded_count);
21099 if (MEM_SIZE_KNOWN_P (destmem))
21100 clear_mem_size (destmem);
21101 if (MEM_SIZE_KNOWN_P (srcmem))
21102 clear_mem_size (srcmem);
21104 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
21108 /* Output "rep; stos" instruction.
21109 Arguments have same meaning as for previous function */
21111 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
21112 rtx count, enum machine_mode mode,
21117 HOST_WIDE_INT rounded_count;
21119 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
21120 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
21121 value = force_reg (mode, gen_lowpart (mode, value));
21122 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
21123 if (mode != QImode)
21125 destexp = gen_rtx_ASHIFT (Pmode, countreg,
21126 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
21127 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
21130 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
21131 if (orig_value == const0_rtx && CONST_INT_P (count))
21133 rounded_count = (INTVAL (count)
21134 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
21135 destmem = shallow_copy_rtx (destmem);
21136 set_mem_size (destmem, rounded_count);
21138 else if (MEM_SIZE_KNOWN_P (destmem))
21139 clear_mem_size (destmem);
21140 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
21144 emit_strmov (rtx destmem, rtx srcmem,
21145 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
21147 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
21148 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
21149 emit_insn (gen_strmov (destptr, dest, srcptr, src));
21152 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
21154 expand_movmem_epilogue (rtx destmem, rtx srcmem,
21155 rtx destptr, rtx srcptr, rtx count, int max_size)
21158 if (CONST_INT_P (count))
21160 HOST_WIDE_INT countval = INTVAL (count);
21163 if ((countval & 0x10) && max_size > 16)
21167 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
21168 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
21171 gcc_unreachable ();
21174 if ((countval & 0x08) && max_size > 8)
21177 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
21180 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
21181 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
21185 if ((countval & 0x04) && max_size > 4)
21187 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
21190 if ((countval & 0x02) && max_size > 2)
21192 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
21195 if ((countval & 0x01) && max_size > 1)
21197 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
21204 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
21205 count, 1, OPTAB_DIRECT);
21206 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
21207 count, QImode, 1, 4);
21211 /* When there are stringops, we can cheaply increase dest and src pointers.
21212 Otherwise we save code size by maintaining offset (zero is readily
21213 available from preceding rep operation) and using x86 addressing modes.
21215 if (TARGET_SINGLE_STRINGOP)
21219 rtx label = ix86_expand_aligntest (count, 4, true);
21220 src = change_address (srcmem, SImode, srcptr);
21221 dest = change_address (destmem, SImode, destptr);
21222 emit_insn (gen_strmov (destptr, dest, srcptr, src));
21223 emit_label (label);
21224 LABEL_NUSES (label) = 1;
21228 rtx label = ix86_expand_aligntest (count, 2, true);
21229 src = change_address (srcmem, HImode, srcptr);
21230 dest = change_address (destmem, HImode, destptr);
21231 emit_insn (gen_strmov (destptr, dest, srcptr, src));
21232 emit_label (label);
21233 LABEL_NUSES (label) = 1;
21237 rtx label = ix86_expand_aligntest (count, 1, true);
21238 src = change_address (srcmem, QImode, srcptr);
21239 dest = change_address (destmem, QImode, destptr);
21240 emit_insn (gen_strmov (destptr, dest, srcptr, src));
21241 emit_label (label);
21242 LABEL_NUSES (label) = 1;
21247 rtx offset = force_reg (Pmode, const0_rtx);
21252 rtx label = ix86_expand_aligntest (count, 4, true);
21253 src = change_address (srcmem, SImode, srcptr);
21254 dest = change_address (destmem, SImode, destptr);
21255 emit_move_insn (dest, src);
21256 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
21257 true, OPTAB_LIB_WIDEN);
21259 emit_move_insn (offset, tmp);
21260 emit_label (label);
21261 LABEL_NUSES (label) = 1;
21265 rtx label = ix86_expand_aligntest (count, 2, true);
21266 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
21267 src = change_address (srcmem, HImode, tmp);
21268 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
21269 dest = change_address (destmem, HImode, tmp);
21270 emit_move_insn (dest, src);
21271 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
21272 true, OPTAB_LIB_WIDEN);
21274 emit_move_insn (offset, tmp);
21275 emit_label (label);
21276 LABEL_NUSES (label) = 1;
21280 rtx label = ix86_expand_aligntest (count, 1, true);
21281 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
21282 src = change_address (srcmem, QImode, tmp);
21283 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
21284 dest = change_address (destmem, QImode, tmp);
21285 emit_move_insn (dest, src);
21286 emit_label (label);
21287 LABEL_NUSES (label) = 1;
21292 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
21294 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
21295 rtx count, int max_size)
21298 expand_simple_binop (counter_mode (count), AND, count,
21299 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
21300 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
21301 gen_lowpart (QImode, value), count, QImode,
21305 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
21307 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
21311 if (CONST_INT_P (count))
21313 HOST_WIDE_INT countval = INTVAL (count);
21316 if ((countval & 0x10) && max_size > 16)
21320 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
21321 emit_insn (gen_strset (destptr, dest, value));
21322 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
21323 emit_insn (gen_strset (destptr, dest, value));
21326 gcc_unreachable ();
21329 if ((countval & 0x08) && max_size > 8)
21333 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
21334 emit_insn (gen_strset (destptr, dest, value));
21338 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
21339 emit_insn (gen_strset (destptr, dest, value));
21340 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
21341 emit_insn (gen_strset (destptr, dest, value));
21345 if ((countval & 0x04) && max_size > 4)
21347 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
21348 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
21351 if ((countval & 0x02) && max_size > 2)
21353 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
21354 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
21357 if ((countval & 0x01) && max_size > 1)
21359 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
21360 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
21367 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
21372 rtx label = ix86_expand_aligntest (count, 16, true);
21375 dest = change_address (destmem, DImode, destptr);
21376 emit_insn (gen_strset (destptr, dest, value));
21377 emit_insn (gen_strset (destptr, dest, value));
21381 dest = change_address (destmem, SImode, destptr);
21382 emit_insn (gen_strset (destptr, dest, value));
21383 emit_insn (gen_strset (destptr, dest, value));
21384 emit_insn (gen_strset (destptr, dest, value));
21385 emit_insn (gen_strset (destptr, dest, value));
21387 emit_label (label);
21388 LABEL_NUSES (label) = 1;
21392 rtx label = ix86_expand_aligntest (count, 8, true);
21395 dest = change_address (destmem, DImode, destptr);
21396 emit_insn (gen_strset (destptr, dest, value));
21400 dest = change_address (destmem, SImode, destptr);
21401 emit_insn (gen_strset (destptr, dest, value));
21402 emit_insn (gen_strset (destptr, dest, value));
21404 emit_label (label);
21405 LABEL_NUSES (label) = 1;
21409 rtx label = ix86_expand_aligntest (count, 4, true);
21410 dest = change_address (destmem, SImode, destptr);
21411 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
21412 emit_label (label);
21413 LABEL_NUSES (label) = 1;
21417 rtx label = ix86_expand_aligntest (count, 2, true);
21418 dest = change_address (destmem, HImode, destptr);
21419 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
21420 emit_label (label);
21421 LABEL_NUSES (label) = 1;
21425 rtx label = ix86_expand_aligntest (count, 1, true);
21426 dest = change_address (destmem, QImode, destptr);
21427 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
21428 emit_label (label);
21429 LABEL_NUSES (label) = 1;
21433 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
21434 DESIRED_ALIGNMENT. */
21436 expand_movmem_prologue (rtx destmem, rtx srcmem,
21437 rtx destptr, rtx srcptr, rtx count,
21438 int align, int desired_alignment)
21440 if (align <= 1 && desired_alignment > 1)
21442 rtx label = ix86_expand_aligntest (destptr, 1, false);
21443 srcmem = change_address (srcmem, QImode, srcptr);
21444 destmem = change_address (destmem, QImode, destptr);
21445 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
21446 ix86_adjust_counter (count, 1);
21447 emit_label (label);
21448 LABEL_NUSES (label) = 1;
21450 if (align <= 2 && desired_alignment > 2)
21452 rtx label = ix86_expand_aligntest (destptr, 2, false);
21453 srcmem = change_address (srcmem, HImode, srcptr);
21454 destmem = change_address (destmem, HImode, destptr);
21455 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
21456 ix86_adjust_counter (count, 2);
21457 emit_label (label);
21458 LABEL_NUSES (label) = 1;
21460 if (align <= 4 && desired_alignment > 4)
21462 rtx label = ix86_expand_aligntest (destptr, 4, false);
21463 srcmem = change_address (srcmem, SImode, srcptr);
21464 destmem = change_address (destmem, SImode, destptr);
21465 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
21466 ix86_adjust_counter (count, 4);
21467 emit_label (label);
21468 LABEL_NUSES (label) = 1;
21470 gcc_assert (desired_alignment <= 8);
21473 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
21474 ALIGN_BYTES is how many bytes need to be copied. */
21476 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
21477 int desired_align, int align_bytes)
21480 rtx orig_dst = dst;
21481 rtx orig_src = src;
21483 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
21484 if (src_align_bytes >= 0)
21485 src_align_bytes = desired_align - src_align_bytes;
21486 if (align_bytes & 1)
21488 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
21489 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
21491 emit_insn (gen_strmov (destreg, dst, srcreg, src));
21493 if (align_bytes & 2)
21495 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
21496 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
21497 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
21498 set_mem_align (dst, 2 * BITS_PER_UNIT);
21499 if (src_align_bytes >= 0
21500 && (src_align_bytes & 1) == (align_bytes & 1)
21501 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
21502 set_mem_align (src, 2 * BITS_PER_UNIT);
21504 emit_insn (gen_strmov (destreg, dst, srcreg, src));
21506 if (align_bytes & 4)
21508 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
21509 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
21510 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
21511 set_mem_align (dst, 4 * BITS_PER_UNIT);
21512 if (src_align_bytes >= 0)
21514 unsigned int src_align = 0;
21515 if ((src_align_bytes & 3) == (align_bytes & 3))
21517 else if ((src_align_bytes & 1) == (align_bytes & 1))
21519 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
21520 set_mem_align (src, src_align * BITS_PER_UNIT);
21523 emit_insn (gen_strmov (destreg, dst, srcreg, src));
21525 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
21526 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
21527 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
21528 set_mem_align (dst, desired_align * BITS_PER_UNIT);
21529 if (src_align_bytes >= 0)
21531 unsigned int src_align = 0;
21532 if ((src_align_bytes & 7) == (align_bytes & 7))
21534 else if ((src_align_bytes & 3) == (align_bytes & 3))
21536 else if ((src_align_bytes & 1) == (align_bytes & 1))
21538 if (src_align > (unsigned int) desired_align)
21539 src_align = desired_align;
21540 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
21541 set_mem_align (src, src_align * BITS_PER_UNIT);
21543 if (MEM_SIZE_KNOWN_P (orig_dst))
21544 set_mem_size (dst, MEM_SIZE (orig_dst) - align_bytes);
21545 if (MEM_SIZE_KNOWN_P (orig_src))
21546 set_mem_size (src, MEM_SIZE (orig_src) - align_bytes);
21551 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
21552 DESIRED_ALIGNMENT. */
21554 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
21555 int align, int desired_alignment)
21557 if (align <= 1 && desired_alignment > 1)
21559 rtx label = ix86_expand_aligntest (destptr, 1, false);
21560 destmem = change_address (destmem, QImode, destptr);
21561 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
21562 ix86_adjust_counter (count, 1);
21563 emit_label (label);
21564 LABEL_NUSES (label) = 1;
21566 if (align <= 2 && desired_alignment > 2)
21568 rtx label = ix86_expand_aligntest (destptr, 2, false);
21569 destmem = change_address (destmem, HImode, destptr);
21570 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
21571 ix86_adjust_counter (count, 2);
21572 emit_label (label);
21573 LABEL_NUSES (label) = 1;
21575 if (align <= 4 && desired_alignment > 4)
21577 rtx label = ix86_expand_aligntest (destptr, 4, false);
21578 destmem = change_address (destmem, SImode, destptr);
21579 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
21580 ix86_adjust_counter (count, 4);
21581 emit_label (label);
21582 LABEL_NUSES (label) = 1;
21584 gcc_assert (desired_alignment <= 8);
21587 /* Set enough from DST to align DST known to by aligned by ALIGN to
21588 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
21590 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
21591 int desired_align, int align_bytes)
21594 rtx orig_dst = dst;
21595 if (align_bytes & 1)
21597 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
21599 emit_insn (gen_strset (destreg, dst,
21600 gen_lowpart (QImode, value)));
21602 if (align_bytes & 2)
21604 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
21605 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
21606 set_mem_align (dst, 2 * BITS_PER_UNIT);
21608 emit_insn (gen_strset (destreg, dst,
21609 gen_lowpart (HImode, value)));
21611 if (align_bytes & 4)
21613 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
21614 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
21615 set_mem_align (dst, 4 * BITS_PER_UNIT);
21617 emit_insn (gen_strset (destreg, dst,
21618 gen_lowpart (SImode, value)));
21620 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
21621 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
21622 set_mem_align (dst, desired_align * BITS_PER_UNIT);
21623 if (MEM_SIZE_KNOWN_P (orig_dst))
21624 set_mem_size (dst, MEM_SIZE (orig_dst) - align_bytes);
21628 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
21629 static enum stringop_alg
21630 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
21631 int *dynamic_check)
21633 const struct stringop_algs * algs;
21634 bool optimize_for_speed;
21635 /* Algorithms using the rep prefix want at least edi and ecx;
21636 additionally, memset wants eax and memcpy wants esi. Don't
21637 consider such algorithms if the user has appropriated those
21638 registers for their own purposes. */
21639 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
21641 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
21643 #define ALG_USABLE_P(alg) (rep_prefix_usable \
21644 || (alg != rep_prefix_1_byte \
21645 && alg != rep_prefix_4_byte \
21646 && alg != rep_prefix_8_byte))
21647 const struct processor_costs *cost;
21649 /* Even if the string operation call is cold, we still might spend a lot
21650 of time processing large blocks. */
21651 if (optimize_function_for_size_p (cfun)
21652 || (optimize_insn_for_size_p ()
21653 && expected_size != -1 && expected_size < 256))
21654 optimize_for_speed = false;
21656 optimize_for_speed = true;
21658 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
21660 *dynamic_check = -1;
21662 algs = &cost->memset[TARGET_64BIT != 0];
21664 algs = &cost->memcpy[TARGET_64BIT != 0];
21665 if (ix86_stringop_alg != no_stringop && ALG_USABLE_P (ix86_stringop_alg))
21666 return ix86_stringop_alg;
21667 /* rep; movq or rep; movl is the smallest variant. */
21668 else if (!optimize_for_speed)
21670 if (!count || (count & 3))
21671 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
21673 return rep_prefix_usable ? rep_prefix_4_byte : loop;
21675 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
21677 else if (expected_size != -1 && expected_size < 4)
21678 return loop_1_byte;
21679 else if (expected_size != -1)
21682 enum stringop_alg alg = libcall;
21683 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
21685 /* We get here if the algorithms that were not libcall-based
21686 were rep-prefix based and we are unable to use rep prefixes
21687 based on global register usage. Break out of the loop and
21688 use the heuristic below. */
21689 if (algs->size[i].max == 0)
21691 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
21693 enum stringop_alg candidate = algs->size[i].alg;
21695 if (candidate != libcall && ALG_USABLE_P (candidate))
21697 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
21698 last non-libcall inline algorithm. */
21699 if (TARGET_INLINE_ALL_STRINGOPS)
21701 /* When the current size is best to be copied by a libcall,
21702 but we are still forced to inline, run the heuristic below
21703 that will pick code for medium sized blocks. */
21704 if (alg != libcall)
21708 else if (ALG_USABLE_P (candidate))
21712 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
21714 /* When asked to inline the call anyway, try to pick meaningful choice.
21715 We look for maximal size of block that is faster to copy by hand and
21716 take blocks of at most of that size guessing that average size will
21717 be roughly half of the block.
21719 If this turns out to be bad, we might simply specify the preferred
21720 choice in ix86_costs. */
21721 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
21722 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
21725 enum stringop_alg alg;
21727 bool any_alg_usable_p = true;
21729 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
21731 enum stringop_alg candidate = algs->size[i].alg;
21732 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
21734 if (candidate != libcall && candidate
21735 && ALG_USABLE_P (candidate))
21736 max = algs->size[i].max;
21738 /* If there aren't any usable algorithms, then recursing on
21739 smaller sizes isn't going to find anything. Just return the
21740 simple byte-at-a-time copy loop. */
21741 if (!any_alg_usable_p)
21743 /* Pick something reasonable. */
21744 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
21745 *dynamic_check = 128;
21746 return loop_1_byte;
21750 alg = decide_alg (count, max / 2, memset, dynamic_check);
21751 gcc_assert (*dynamic_check == -1);
21752 gcc_assert (alg != libcall);
21753 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
21754 *dynamic_check = max;
21757 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
21758 #undef ALG_USABLE_P
21761 /* Decide on alignment. We know that the operand is already aligned to ALIGN
21762 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
21764 decide_alignment (int align,
21765 enum stringop_alg alg,
21768 int desired_align = 0;
21772 gcc_unreachable ();
21774 case unrolled_loop:
21775 desired_align = GET_MODE_SIZE (Pmode);
21777 case rep_prefix_8_byte:
21780 case rep_prefix_4_byte:
21781 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
21782 copying whole cacheline at once. */
21783 if (TARGET_PENTIUMPRO)
21788 case rep_prefix_1_byte:
21789 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
21790 copying whole cacheline at once. */
21791 if (TARGET_PENTIUMPRO)
21805 if (desired_align < align)
21806 desired_align = align;
21807 if (expected_size != -1 && expected_size < 4)
21808 desired_align = align;
21809 return desired_align;
21812 /* Return the smallest power of 2 greater than VAL. */
21814 smallest_pow2_greater_than (int val)
21822 /* Expand string move (memcpy) operation. Use i386 string operations
21823 when profitable. expand_setmem contains similar code. The code
21824 depends upon architecture, block size and alignment, but always has
21825 the same overall structure:
21827 1) Prologue guard: Conditional that jumps up to epilogues for small
21828 blocks that can be handled by epilogue alone. This is faster
21829 but also needed for correctness, since prologue assume the block
21830 is larger than the desired alignment.
21832 Optional dynamic check for size and libcall for large
21833 blocks is emitted here too, with -minline-stringops-dynamically.
21835 2) Prologue: copy first few bytes in order to get destination
21836 aligned to DESIRED_ALIGN. It is emitted only when ALIGN is less
21837 than DESIRED_ALIGN and up to DESIRED_ALIGN - ALIGN bytes can be
21838 copied. We emit either a jump tree on power of two sized
21839 blocks, or a byte loop.
21841 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
21842 with specified algorithm.
21844 4) Epilogue: code copying tail of the block that is too small to be
21845 handled by main body (or up to size guarded by prologue guard). */
21848 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
21849 rtx expected_align_exp, rtx expected_size_exp)
21855 rtx jump_around_label = NULL;
21856 HOST_WIDE_INT align = 1;
21857 unsigned HOST_WIDE_INT count = 0;
21858 HOST_WIDE_INT expected_size = -1;
21859 int size_needed = 0, epilogue_size_needed;
21860 int desired_align = 0, align_bytes = 0;
21861 enum stringop_alg alg;
21863 bool need_zero_guard = false;
21865 if (CONST_INT_P (align_exp))
21866 align = INTVAL (align_exp);
21867 /* i386 can do misaligned access on reasonably increased cost. */
21868 if (CONST_INT_P (expected_align_exp)
21869 && INTVAL (expected_align_exp) > align)
21870 align = INTVAL (expected_align_exp);
21871 /* ALIGN is the minimum of destination and source alignment, but we care here
21872 just about destination alignment. */
21873 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
21874 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
21876 if (CONST_INT_P (count_exp))
21877 count = expected_size = INTVAL (count_exp);
21878 if (CONST_INT_P (expected_size_exp) && count == 0)
21879 expected_size = INTVAL (expected_size_exp);
21881 /* Make sure we don't need to care about overflow later on. */
21882 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21885 /* Step 0: Decide on preferred algorithm, desired alignment and
21886 size of chunks to be copied by main loop. */
21888 alg = decide_alg (count, expected_size, false, &dynamic_check);
21889 desired_align = decide_alignment (align, alg, expected_size);
21891 if (!TARGET_ALIGN_STRINGOPS)
21892 align = desired_align;
21894 if (alg == libcall)
21896 gcc_assert (alg != no_stringop);
21898 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
21899 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21900 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
21905 gcc_unreachable ();
21907 need_zero_guard = true;
21908 size_needed = GET_MODE_SIZE (Pmode);
21910 case unrolled_loop:
21911 need_zero_guard = true;
21912 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
21914 case rep_prefix_8_byte:
21917 case rep_prefix_4_byte:
21920 case rep_prefix_1_byte:
21924 need_zero_guard = true;
21929 epilogue_size_needed = size_needed;
21931 /* Step 1: Prologue guard. */
21933 /* Alignment code needs count to be in register. */
21934 if (CONST_INT_P (count_exp) && desired_align > align)
21936 if (INTVAL (count_exp) > desired_align
21937 && INTVAL (count_exp) > size_needed)
21940 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21941 if (align_bytes <= 0)
21944 align_bytes = desired_align - align_bytes;
21946 if (align_bytes == 0)
21947 count_exp = force_reg (counter_mode (count_exp), count_exp);
21949 gcc_assert (desired_align >= 1 && align >= 1);
21951 /* Ensure that alignment prologue won't copy past end of block. */
21952 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21954 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21955 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
21956 Make sure it is power of 2. */
21957 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21961 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21963 /* If main algorithm works on QImode, no epilogue is needed.
21964 For small sizes just don't align anything. */
21965 if (size_needed == 1)
21966 desired_align = align;
21973 label = gen_label_rtx ();
21974 emit_cmp_and_jump_insns (count_exp,
21975 GEN_INT (epilogue_size_needed),
21976 LTU, 0, counter_mode (count_exp), 1, label);
21977 if (expected_size == -1 || expected_size < epilogue_size_needed)
21978 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21980 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21984 /* Emit code to decide on runtime whether library call or inline should be
21986 if (dynamic_check != -1)
21988 if (CONST_INT_P (count_exp))
21990 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
21992 emit_block_move_via_libcall (dst, src, count_exp, false);
21993 count_exp = const0_rtx;
21999 rtx hot_label = gen_label_rtx ();
22000 jump_around_label = gen_label_rtx ();
22001 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
22002 LEU, 0, GET_MODE (count_exp), 1, hot_label);
22003 predict_jump (REG_BR_PROB_BASE * 90 / 100);
22004 emit_block_move_via_libcall (dst, src, count_exp, false);
22005 emit_jump (jump_around_label);
22006 emit_label (hot_label);
22010 /* Step 2: Alignment prologue. */
22012 if (desired_align > align)
22014 if (align_bytes == 0)
22016 /* Except for the first move in epilogue, we no longer know
22017 constant offset in aliasing info. It don't seems to worth
22018 the pain to maintain it for the first move, so throw away
22020 src = change_address (src, BLKmode, srcreg);
22021 dst = change_address (dst, BLKmode, destreg);
22022 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
22027 /* If we know how many bytes need to be stored before dst is
22028 sufficiently aligned, maintain aliasing info accurately. */
22029 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
22030 desired_align, align_bytes);
22031 count_exp = plus_constant (count_exp, -align_bytes);
22032 count -= align_bytes;
22034 if (need_zero_guard
22035 && (count < (unsigned HOST_WIDE_INT) size_needed
22036 || (align_bytes == 0
22037 && count < ((unsigned HOST_WIDE_INT) size_needed
22038 + desired_align - align))))
22040 /* It is possible that we copied enough so the main loop will not
22042 gcc_assert (size_needed > 1);
22043 if (label == NULL_RTX)
22044 label = gen_label_rtx ();
22045 emit_cmp_and_jump_insns (count_exp,
22046 GEN_INT (size_needed),
22047 LTU, 0, counter_mode (count_exp), 1, label);
22048 if (expected_size == -1
22049 || expected_size < (desired_align - align) / 2 + size_needed)
22050 predict_jump (REG_BR_PROB_BASE * 20 / 100);
22052 predict_jump (REG_BR_PROB_BASE * 60 / 100);
22055 if (label && size_needed == 1)
22057 emit_label (label);
22058 LABEL_NUSES (label) = 1;
22060 epilogue_size_needed = 1;
22062 else if (label == NULL_RTX)
22063 epilogue_size_needed = size_needed;
22065 /* Step 3: Main loop. */
22071 gcc_unreachable ();
22073 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
22074 count_exp, QImode, 1, expected_size);
22077 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
22078 count_exp, Pmode, 1, expected_size);
22080 case unrolled_loop:
22081 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
22082 registers for 4 temporaries anyway. */
22083 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
22084 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
22087 case rep_prefix_8_byte:
22088 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
22091 case rep_prefix_4_byte:
22092 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
22095 case rep_prefix_1_byte:
22096 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
22100 /* Adjust properly the offset of src and dest memory for aliasing. */
22101 if (CONST_INT_P (count_exp))
22103 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
22104 (count / size_needed) * size_needed);
22105 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
22106 (count / size_needed) * size_needed);
22110 src = change_address (src, BLKmode, srcreg);
22111 dst = change_address (dst, BLKmode, destreg);
22114 /* Step 4: Epilogue to copy the remaining bytes. */
22118 /* When the main loop is done, COUNT_EXP might hold original count,
22119 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
22120 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
22121 bytes. Compensate if needed. */
22123 if (size_needed < epilogue_size_needed)
22126 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
22127 GEN_INT (size_needed - 1), count_exp, 1,
22129 if (tmp != count_exp)
22130 emit_move_insn (count_exp, tmp);
22132 emit_label (label);
22133 LABEL_NUSES (label) = 1;
22136 if (count_exp != const0_rtx && epilogue_size_needed > 1)
22137 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
22138 epilogue_size_needed);
22139 if (jump_around_label)
22140 emit_label (jump_around_label);
22144 /* Helper function for memcpy. For QImode value 0xXY produce
22145 0xXYXYXYXY of wide specified by MODE. This is essentially
22146 a * 0x10101010, but we can do slightly better than
22147 synth_mult by unwinding the sequence by hand on CPUs with
22150 promote_duplicated_reg (enum machine_mode mode, rtx val)
22152 enum machine_mode valmode = GET_MODE (val);
22154 int nops = mode == DImode ? 3 : 2;
22156 gcc_assert (mode == SImode || mode == DImode);
22157 if (val == const0_rtx)
22158 return copy_to_mode_reg (mode, const0_rtx);
22159 if (CONST_INT_P (val))
22161 HOST_WIDE_INT v = INTVAL (val) & 255;
22165 if (mode == DImode)
22166 v |= (v << 16) << 16;
22167 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
22170 if (valmode == VOIDmode)
22172 if (valmode != QImode)
22173 val = gen_lowpart (QImode, val);
22174 if (mode == QImode)
22176 if (!TARGET_PARTIAL_REG_STALL)
22178 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
22179 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
22180 <= (ix86_cost->shift_const + ix86_cost->add) * nops
22181 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
22183 rtx reg = convert_modes (mode, QImode, val, true);
22184 tmp = promote_duplicated_reg (mode, const1_rtx);
22185 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
22190 rtx reg = convert_modes (mode, QImode, val, true);
22192 if (!TARGET_PARTIAL_REG_STALL)
22193 if (mode == SImode)
22194 emit_insn (gen_movsi_insv_1 (reg, reg));
22196 emit_insn (gen_movdi_insv_1 (reg, reg));
22199 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
22200 NULL, 1, OPTAB_DIRECT);
22202 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
22204 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
22205 NULL, 1, OPTAB_DIRECT);
22206 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
22207 if (mode == SImode)
22209 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
22210 NULL, 1, OPTAB_DIRECT);
22211 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
22216 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
22217 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
22218 alignment from ALIGN to DESIRED_ALIGN. */
22220 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
22225 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
22226 promoted_val = promote_duplicated_reg (DImode, val);
22227 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
22228 promoted_val = promote_duplicated_reg (SImode, val);
22229 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
22230 promoted_val = promote_duplicated_reg (HImode, val);
22232 promoted_val = val;
22234 return promoted_val;
22237 /* Expand string clear operation (bzero). Use i386 string operations when
22238 profitable. See expand_movmem comment for explanation of individual
22239 steps performed. */
22241 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
22242 rtx expected_align_exp, rtx expected_size_exp)
22247 rtx jump_around_label = NULL;
22248 HOST_WIDE_INT align = 1;
22249 unsigned HOST_WIDE_INT count = 0;
22250 HOST_WIDE_INT expected_size = -1;
22251 int size_needed = 0, epilogue_size_needed;
22252 int desired_align = 0, align_bytes = 0;
22253 enum stringop_alg alg;
22254 rtx promoted_val = NULL;
22255 bool force_loopy_epilogue = false;
22257 bool need_zero_guard = false;
22259 if (CONST_INT_P (align_exp))
22260 align = INTVAL (align_exp);
22261 /* i386 can do misaligned access on reasonably increased cost. */
22262 if (CONST_INT_P (expected_align_exp)
22263 && INTVAL (expected_align_exp) > align)
22264 align = INTVAL (expected_align_exp);
22265 if (CONST_INT_P (count_exp))
22266 count = expected_size = INTVAL (count_exp);
22267 if (CONST_INT_P (expected_size_exp) && count == 0)
22268 expected_size = INTVAL (expected_size_exp);
22270 /* Make sure we don't need to care about overflow later on. */
22271 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
22274 /* Step 0: Decide on preferred algorithm, desired alignment and
22275 size of chunks to be copied by main loop. */
22277 alg = decide_alg (count, expected_size, true, &dynamic_check);
22278 desired_align = decide_alignment (align, alg, expected_size);
22280 if (!TARGET_ALIGN_STRINGOPS)
22281 align = desired_align;
22283 if (alg == libcall)
22285 gcc_assert (alg != no_stringop);
22287 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
22288 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
22293 gcc_unreachable ();
22295 need_zero_guard = true;
22296 size_needed = GET_MODE_SIZE (Pmode);
22298 case unrolled_loop:
22299 need_zero_guard = true;
22300 size_needed = GET_MODE_SIZE (Pmode) * 4;
22302 case rep_prefix_8_byte:
22305 case rep_prefix_4_byte:
22308 case rep_prefix_1_byte:
22312 need_zero_guard = true;
22316 epilogue_size_needed = size_needed;
22318 /* Step 1: Prologue guard. */
22320 /* Alignment code needs count to be in register. */
22321 if (CONST_INT_P (count_exp) && desired_align > align)
22323 if (INTVAL (count_exp) > desired_align
22324 && INTVAL (count_exp) > size_needed)
22327 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
22328 if (align_bytes <= 0)
22331 align_bytes = desired_align - align_bytes;
22333 if (align_bytes == 0)
22335 enum machine_mode mode = SImode;
22336 if (TARGET_64BIT && (count & ~0xffffffff))
22338 count_exp = force_reg (mode, count_exp);
22341 /* Do the cheap promotion to allow better CSE across the
22342 main loop and epilogue (ie one load of the big constant in the
22343 front of all code. */
22344 if (CONST_INT_P (val_exp))
22345 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
22346 desired_align, align);
22347 /* Ensure that alignment prologue won't copy past end of block. */
22348 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
22350 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
22351 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
22352 Make sure it is power of 2. */
22353 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
22355 /* To improve performance of small blocks, we jump around the VAL
22356 promoting mode. This mean that if the promoted VAL is not constant,
22357 we might not use it in the epilogue and have to use byte
22359 if (epilogue_size_needed > 2 && !promoted_val)
22360 force_loopy_epilogue = true;
22363 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
22365 /* If main algorithm works on QImode, no epilogue is needed.
22366 For small sizes just don't align anything. */
22367 if (size_needed == 1)
22368 desired_align = align;
22375 label = gen_label_rtx ();
22376 emit_cmp_and_jump_insns (count_exp,
22377 GEN_INT (epilogue_size_needed),
22378 LTU, 0, counter_mode (count_exp), 1, label);
22379 if (expected_size == -1 || expected_size <= epilogue_size_needed)
22380 predict_jump (REG_BR_PROB_BASE * 60 / 100);
22382 predict_jump (REG_BR_PROB_BASE * 20 / 100);
22385 if (dynamic_check != -1)
22387 rtx hot_label = gen_label_rtx ();
22388 jump_around_label = gen_label_rtx ();
22389 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
22390 LEU, 0, counter_mode (count_exp), 1, hot_label);
22391 predict_jump (REG_BR_PROB_BASE * 90 / 100);
22392 set_storage_via_libcall (dst, count_exp, val_exp, false);
22393 emit_jump (jump_around_label);
22394 emit_label (hot_label);
22397 /* Step 2: Alignment prologue. */
22399 /* Do the expensive promotion once we branched off the small blocks. */
22401 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
22402 desired_align, align);
22403 gcc_assert (desired_align >= 1 && align >= 1);
22405 if (desired_align > align)
22407 if (align_bytes == 0)
22409 /* Except for the first move in epilogue, we no longer know
22410 constant offset in aliasing info. It don't seems to worth
22411 the pain to maintain it for the first move, so throw away
22413 dst = change_address (dst, BLKmode, destreg);
22414 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
22419 /* If we know how many bytes need to be stored before dst is
22420 sufficiently aligned, maintain aliasing info accurately. */
22421 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
22422 desired_align, align_bytes);
22423 count_exp = plus_constant (count_exp, -align_bytes);
22424 count -= align_bytes;
22426 if (need_zero_guard
22427 && (count < (unsigned HOST_WIDE_INT) size_needed
22428 || (align_bytes == 0
22429 && count < ((unsigned HOST_WIDE_INT) size_needed
22430 + desired_align - align))))
22432 /* It is possible that we copied enough so the main loop will not
22434 gcc_assert (size_needed > 1);
22435 if (label == NULL_RTX)
22436 label = gen_label_rtx ();
22437 emit_cmp_and_jump_insns (count_exp,
22438 GEN_INT (size_needed),
22439 LTU, 0, counter_mode (count_exp), 1, label);
22440 if (expected_size == -1
22441 || expected_size < (desired_align - align) / 2 + size_needed)
22442 predict_jump (REG_BR_PROB_BASE * 20 / 100);
22444 predict_jump (REG_BR_PROB_BASE * 60 / 100);
22447 if (label && size_needed == 1)
22449 emit_label (label);
22450 LABEL_NUSES (label) = 1;
22452 promoted_val = val_exp;
22453 epilogue_size_needed = 1;
22455 else if (label == NULL_RTX)
22456 epilogue_size_needed = size_needed;
22458 /* Step 3: Main loop. */
22464 gcc_unreachable ();
22466 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
22467 count_exp, QImode, 1, expected_size);
22470 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
22471 count_exp, Pmode, 1, expected_size);
22473 case unrolled_loop:
22474 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
22475 count_exp, Pmode, 4, expected_size);
22477 case rep_prefix_8_byte:
22478 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
22481 case rep_prefix_4_byte:
22482 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
22485 case rep_prefix_1_byte:
22486 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
22490 /* Adjust properly the offset of src and dest memory for aliasing. */
22491 if (CONST_INT_P (count_exp))
22492 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
22493 (count / size_needed) * size_needed);
22495 dst = change_address (dst, BLKmode, destreg);
22497 /* Step 4: Epilogue to copy the remaining bytes. */
22501 /* When the main loop is done, COUNT_EXP might hold original count,
22502 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
22503 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
22504 bytes. Compensate if needed. */
22506 if (size_needed < epilogue_size_needed)
22509 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
22510 GEN_INT (size_needed - 1), count_exp, 1,
22512 if (tmp != count_exp)
22513 emit_move_insn (count_exp, tmp);
22515 emit_label (label);
22516 LABEL_NUSES (label) = 1;
22519 if (count_exp != const0_rtx && epilogue_size_needed > 1)
22521 if (force_loopy_epilogue)
22522 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
22523 epilogue_size_needed);
22525 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
22526 epilogue_size_needed);
22528 if (jump_around_label)
22529 emit_label (jump_around_label);
22533 /* Expand the appropriate insns for doing strlen if not just doing
22536 out = result, initialized with the start address
22537 align_rtx = alignment of the address.
22538 scratch = scratch register, initialized with the startaddress when
22539 not aligned, otherwise undefined
22541 This is just the body. It needs the initializations mentioned above and
22542 some address computing at the end. These things are done in i386.md. */
22545 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
22549 rtx align_2_label = NULL_RTX;
22550 rtx align_3_label = NULL_RTX;
22551 rtx align_4_label = gen_label_rtx ();
22552 rtx end_0_label = gen_label_rtx ();
22554 rtx tmpreg = gen_reg_rtx (SImode);
22555 rtx scratch = gen_reg_rtx (SImode);
22559 if (CONST_INT_P (align_rtx))
22560 align = INTVAL (align_rtx);
22562 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
22564 /* Is there a known alignment and is it less than 4? */
22567 rtx scratch1 = gen_reg_rtx (Pmode);
22568 emit_move_insn (scratch1, out);
22569 /* Is there a known alignment and is it not 2? */
22572 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
22573 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
22575 /* Leave just the 3 lower bits. */
22576 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
22577 NULL_RTX, 0, OPTAB_WIDEN);
22579 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
22580 Pmode, 1, align_4_label);
22581 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
22582 Pmode, 1, align_2_label);
22583 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
22584 Pmode, 1, align_3_label);
22588 /* Since the alignment is 2, we have to check 2 or 0 bytes;
22589 check if is aligned to 4 - byte. */
22591 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
22592 NULL_RTX, 0, OPTAB_WIDEN);
22594 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
22595 Pmode, 1, align_4_label);
22598 mem = change_address (src, QImode, out);
22600 /* Now compare the bytes. */
22602 /* Compare the first n unaligned byte on a byte per byte basis. */
22603 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
22604 QImode, 1, end_0_label);
22606 /* Increment the address. */
22607 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
22609 /* Not needed with an alignment of 2 */
22612 emit_label (align_2_label);
22614 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
22617 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
22619 emit_label (align_3_label);
22622 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
22625 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
22628 /* Generate loop to check 4 bytes at a time. It is not a good idea to
22629 align this loop. It gives only huge programs, but does not help to
22631 emit_label (align_4_label);
22633 mem = change_address (src, SImode, out);
22634 emit_move_insn (scratch, mem);
22635 emit_insn (ix86_gen_add3 (out, out, GEN_INT (4)));
22637 /* This formula yields a nonzero result iff one of the bytes is zero.
22638 This saves three branches inside loop and many cycles. */
22640 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
22641 emit_insn (gen_one_cmplsi2 (scratch, scratch));
22642 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
22643 emit_insn (gen_andsi3 (tmpreg, tmpreg,
22644 gen_int_mode (0x80808080, SImode)));
22645 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
22650 rtx reg = gen_reg_rtx (SImode);
22651 rtx reg2 = gen_reg_rtx (Pmode);
22652 emit_move_insn (reg, tmpreg);
22653 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
22655 /* If zero is not in the first two bytes, move two bytes forward. */
22656 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
22657 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
22658 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
22659 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
22660 gen_rtx_IF_THEN_ELSE (SImode, tmp,
22663 /* Emit lea manually to avoid clobbering of flags. */
22664 emit_insn (gen_rtx_SET (SImode, reg2,
22665 gen_rtx_PLUS (Pmode, out, const2_rtx)));
22667 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
22668 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
22669 emit_insn (gen_rtx_SET (VOIDmode, out,
22670 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
22676 rtx end_2_label = gen_label_rtx ();
22677 /* Is zero in the first two bytes? */
22679 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
22680 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
22681 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
22682 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
22683 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
22685 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
22686 JUMP_LABEL (tmp) = end_2_label;
22688 /* Not in the first two. Move two bytes forward. */
22689 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
22690 emit_insn (ix86_gen_add3 (out, out, const2_rtx));
22692 emit_label (end_2_label);
22696 /* Avoid branch in fixing the byte. */
22697 tmpreg = gen_lowpart (QImode, tmpreg);
22698 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
22699 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
22700 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
22701 emit_insn (ix86_gen_sub3_carry (out, out, GEN_INT (3), tmp, cmp));
22703 emit_label (end_0_label);
22706 /* Expand strlen. */
22709 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
22711 rtx addr, scratch1, scratch2, scratch3, scratch4;
22713 /* The generic case of strlen expander is long. Avoid it's
22714 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
22716 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
22717 && !TARGET_INLINE_ALL_STRINGOPS
22718 && !optimize_insn_for_size_p ()
22719 && (!CONST_INT_P (align) || INTVAL (align) < 4))
22722 addr = force_reg (Pmode, XEXP (src, 0));
22723 scratch1 = gen_reg_rtx (Pmode);
22725 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
22726 && !optimize_insn_for_size_p ())
22728 /* Well it seems that some optimizer does not combine a call like
22729 foo(strlen(bar), strlen(bar));
22730 when the move and the subtraction is done here. It does calculate
22731 the length just once when these instructions are done inside of
22732 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
22733 often used and I use one fewer register for the lifetime of
22734 output_strlen_unroll() this is better. */
22736 emit_move_insn (out, addr);
22738 ix86_expand_strlensi_unroll_1 (out, src, align);
22740 /* strlensi_unroll_1 returns the address of the zero at the end of
22741 the string, like memchr(), so compute the length by subtracting
22742 the start address. */
22743 emit_insn (ix86_gen_sub3 (out, out, addr));
22749 /* Can't use this if the user has appropriated eax, ecx, or edi. */
22750 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
22753 scratch2 = gen_reg_rtx (Pmode);
22754 scratch3 = gen_reg_rtx (Pmode);
22755 scratch4 = force_reg (Pmode, constm1_rtx);
22757 emit_move_insn (scratch3, addr);
22758 eoschar = force_reg (QImode, eoschar);
22760 src = replace_equiv_address_nv (src, scratch3);
22762 /* If .md starts supporting :P, this can be done in .md. */
22763 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
22764 scratch4), UNSPEC_SCAS);
22765 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
22766 emit_insn (ix86_gen_one_cmpl2 (scratch2, scratch1));
22767 emit_insn (ix86_gen_add3 (out, scratch2, constm1_rtx));
22772 /* For given symbol (function) construct code to compute address of it's PLT
22773 entry in large x86-64 PIC model. */
22775 construct_plt_address (rtx symbol)
22777 rtx tmp = gen_reg_rtx (Pmode);
22778 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
22780 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
22781 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
22783 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
22784 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
22789 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
22791 rtx pop, bool sibcall)
22793 /* We need to represent that SI and DI registers are clobbered
22795 static int clobbered_registers[] = {
22796 XMM6_REG, XMM7_REG, XMM8_REG,
22797 XMM9_REG, XMM10_REG, XMM11_REG,
22798 XMM12_REG, XMM13_REG, XMM14_REG,
22799 XMM15_REG, SI_REG, DI_REG
22801 rtx vec[ARRAY_SIZE (clobbered_registers) + 3];
22802 rtx use = NULL, call;
22803 unsigned int vec_len;
22805 if (pop == const0_rtx)
22807 gcc_assert (!TARGET_64BIT || !pop);
22809 if (TARGET_MACHO && !TARGET_64BIT)
22812 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
22813 fnaddr = machopic_indirect_call_target (fnaddr);
22818 /* Static functions and indirect calls don't need the pic register. */
22819 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
22820 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
22821 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
22822 use_reg (&use, pic_offset_table_rtx);
22825 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
22827 rtx al = gen_rtx_REG (QImode, AX_REG);
22828 emit_move_insn (al, callarg2);
22829 use_reg (&use, al);
22832 if (ix86_cmodel == CM_LARGE_PIC
22834 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
22835 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
22836 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
22838 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
22839 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
22841 fnaddr = XEXP (fnaddr, 0);
22842 if (GET_MODE (fnaddr) != Pmode)
22843 fnaddr = convert_to_mode (Pmode, fnaddr, 1);
22844 fnaddr = gen_rtx_MEM (QImode, copy_to_mode_reg (Pmode, fnaddr));
22848 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
22850 call = gen_rtx_SET (VOIDmode, retval, call);
22851 vec[vec_len++] = call;
22855 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
22856 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
22857 vec[vec_len++] = pop;
22860 if (TARGET_64BIT_MS_ABI
22861 && (!callarg2 || INTVAL (callarg2) != -2))
22865 vec[vec_len++] = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
22866 UNSPEC_MS_TO_SYSV_CALL);
22868 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
22870 = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
22872 gen_rtx_REG (SSE_REGNO_P (clobbered_registers[i])
22874 clobbered_registers[i]));
22877 /* Add UNSPEC_CALL_NEEDS_VZEROUPPER decoration. */
22878 if (TARGET_VZEROUPPER)
22881 if (cfun->machine->callee_pass_avx256_p)
22883 if (cfun->machine->callee_return_avx256_p)
22884 avx256 = callee_return_pass_avx256;
22886 avx256 = callee_pass_avx256;
22888 else if (cfun->machine->callee_return_avx256_p)
22889 avx256 = callee_return_avx256;
22891 avx256 = call_no_avx256;
22893 if (reload_completed)
22894 emit_insn (gen_avx_vzeroupper (GEN_INT (avx256)));
22896 vec[vec_len++] = gen_rtx_UNSPEC (VOIDmode,
22897 gen_rtvec (1, GEN_INT (avx256)),
22898 UNSPEC_CALL_NEEDS_VZEROUPPER);
22902 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (vec_len, vec));
22903 call = emit_call_insn (call);
22905 CALL_INSN_FUNCTION_USAGE (call) = use;
22911 ix86_split_call_vzeroupper (rtx insn, rtx vzeroupper)
22913 rtx pat = PATTERN (insn);
22914 rtvec vec = XVEC (pat, 0);
22915 int len = GET_NUM_ELEM (vec) - 1;
22917 /* Strip off the last entry of the parallel. */
22918 gcc_assert (GET_CODE (RTVEC_ELT (vec, len)) == UNSPEC);
22919 gcc_assert (XINT (RTVEC_ELT (vec, len), 1) == UNSPEC_CALL_NEEDS_VZEROUPPER);
22921 pat = RTVEC_ELT (vec, 0);
22923 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (len, &RTVEC_ELT (vec, 0)));
22925 emit_insn (gen_avx_vzeroupper (vzeroupper));
22926 emit_call_insn (pat);
22929 /* Output the assembly for a call instruction. */
22932 ix86_output_call_insn (rtx insn, rtx call_op)
22934 bool direct_p = constant_call_address_operand (call_op, Pmode);
22935 bool seh_nop_p = false;
22938 if (SIBLING_CALL_P (insn))
22942 /* SEH epilogue detection requires the indirect branch case
22943 to include REX.W. */
22944 else if (TARGET_SEH)
22945 xasm = "rex.W jmp %A0";
22949 output_asm_insn (xasm, &call_op);
22953 /* SEH unwinding can require an extra nop to be emitted in several
22954 circumstances. Determine if we have one of those. */
22959 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
22961 /* If we get to another real insn, we don't need the nop. */
22965 /* If we get to the epilogue note, prevent a catch region from
22966 being adjacent to the standard epilogue sequence. If non-
22967 call-exceptions, we'll have done this during epilogue emission. */
22968 if (NOTE_P (i) && NOTE_KIND (i) == NOTE_INSN_EPILOGUE_BEG
22969 && !flag_non_call_exceptions
22970 && !can_throw_internal (insn))
22977 /* If we didn't find a real insn following the call, prevent the
22978 unwinder from looking into the next function. */
22984 xasm = "call\t%P0";
22986 xasm = "call\t%A0";
22988 output_asm_insn (xasm, &call_op);
22996 /* Clear stack slot assignments remembered from previous functions.
22997 This is called from INIT_EXPANDERS once before RTL is emitted for each
23000 static struct machine_function *
23001 ix86_init_machine_status (void)
23003 struct machine_function *f;
23005 f = ggc_alloc_cleared_machine_function ();
23006 f->use_fast_prologue_epilogue_nregs = -1;
23007 f->tls_descriptor_call_expanded_p = 0;
23008 f->call_abi = ix86_abi;
23013 /* Return a MEM corresponding to a stack slot with mode MODE.
23014 Allocate a new slot if necessary.
23016 The RTL for a function can have several slots available: N is
23017 which slot to use. */
23020 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
23022 struct stack_local_entry *s;
23024 gcc_assert (n < MAX_386_STACK_LOCALS);
23026 /* Virtual slot is valid only before vregs are instantiated. */
23027 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
23029 for (s = ix86_stack_locals; s; s = s->next)
23030 if (s->mode == mode && s->n == n)
23031 return validize_mem (copy_rtx (s->rtl));
23033 s = ggc_alloc_stack_local_entry ();
23036 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
23038 s->next = ix86_stack_locals;
23039 ix86_stack_locals = s;
23040 return validize_mem (s->rtl);
23043 /* Calculate the length of the memory address in the instruction encoding.
23044 Includes addr32 prefix, does not include the one-byte modrm, opcode,
23045 or other prefixes. */
23048 memory_address_length (rtx addr)
23050 struct ix86_address parts;
23051 rtx base, index, disp;
23055 if (GET_CODE (addr) == PRE_DEC
23056 || GET_CODE (addr) == POST_INC
23057 || GET_CODE (addr) == PRE_MODIFY
23058 || GET_CODE (addr) == POST_MODIFY)
23061 ok = ix86_decompose_address (addr, &parts);
23064 if (parts.base && GET_CODE (parts.base) == SUBREG)
23065 parts.base = SUBREG_REG (parts.base);
23066 if (parts.index && GET_CODE (parts.index) == SUBREG)
23067 parts.index = SUBREG_REG (parts.index);
23070 index = parts.index;
23073 /* Add length of addr32 prefix. */
23074 len = (GET_CODE (addr) == ZERO_EXTEND
23075 || GET_CODE (addr) == AND);
23078 - esp as the base always wants an index,
23079 - ebp as the base always wants a displacement,
23080 - r12 as the base always wants an index,
23081 - r13 as the base always wants a displacement. */
23083 /* Register Indirect. */
23084 if (base && !index && !disp)
23086 /* esp (for its index) and ebp (for its displacement) need
23087 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
23090 && (addr == arg_pointer_rtx
23091 || addr == frame_pointer_rtx
23092 || REGNO (addr) == SP_REG
23093 || REGNO (addr) == BP_REG
23094 || REGNO (addr) == R12_REG
23095 || REGNO (addr) == R13_REG))
23099 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
23100 is not disp32, but disp32(%rip), so for disp32
23101 SIB byte is needed, unless print_operand_address
23102 optimizes it into disp32(%rip) or (%rip) is implied
23104 else if (disp && !base && !index)
23111 if (GET_CODE (disp) == CONST)
23112 symbol = XEXP (disp, 0);
23113 if (GET_CODE (symbol) == PLUS
23114 && CONST_INT_P (XEXP (symbol, 1)))
23115 symbol = XEXP (symbol, 0);
23117 if (GET_CODE (symbol) != LABEL_REF
23118 && (GET_CODE (symbol) != SYMBOL_REF
23119 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
23120 && (GET_CODE (symbol) != UNSPEC
23121 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
23122 && XINT (symbol, 1) != UNSPEC_PCREL
23123 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
23130 /* Find the length of the displacement constant. */
23133 if (base && satisfies_constraint_K (disp))
23138 /* ebp always wants a displacement. Similarly r13. */
23139 else if (base && REG_P (base)
23140 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
23143 /* An index requires the two-byte modrm form.... */
23145 /* ...like esp (or r12), which always wants an index. */
23146 || base == arg_pointer_rtx
23147 || base == frame_pointer_rtx
23148 || (base && REG_P (base)
23149 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
23166 /* Compute default value for "length_immediate" attribute. When SHORTFORM
23167 is set, expect that insn have 8bit immediate alternative. */
23169 ix86_attr_length_immediate_default (rtx insn, bool shortform)
23173 extract_insn_cached (insn);
23174 for (i = recog_data.n_operands - 1; i >= 0; --i)
23175 if (CONSTANT_P (recog_data.operand[i]))
23177 enum attr_mode mode = get_attr_mode (insn);
23180 if (shortform && CONST_INT_P (recog_data.operand[i]))
23182 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
23189 ival = trunc_int_for_mode (ival, HImode);
23192 ival = trunc_int_for_mode (ival, SImode);
23197 if (IN_RANGE (ival, -128, 127))
23214 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
23219 fatal_insn ("unknown insn mode", insn);
23224 /* Compute default value for "length_address" attribute. */
23226 ix86_attr_length_address_default (rtx insn)
23230 if (get_attr_type (insn) == TYPE_LEA)
23232 rtx set = PATTERN (insn), addr;
23234 if (GET_CODE (set) == PARALLEL)
23235 set = XVECEXP (set, 0, 0);
23237 gcc_assert (GET_CODE (set) == SET);
23239 addr = SET_SRC (set);
23240 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
23242 if (GET_CODE (addr) == ZERO_EXTEND)
23243 addr = XEXP (addr, 0);
23244 if (GET_CODE (addr) == SUBREG)
23245 addr = SUBREG_REG (addr);
23248 return memory_address_length (addr);
23251 extract_insn_cached (insn);
23252 for (i = recog_data.n_operands - 1; i >= 0; --i)
23253 if (MEM_P (recog_data.operand[i]))
23255 constrain_operands_cached (reload_completed);
23256 if (which_alternative != -1)
23258 const char *constraints = recog_data.constraints[i];
23259 int alt = which_alternative;
23261 while (*constraints == '=' || *constraints == '+')
23264 while (*constraints++ != ',')
23266 /* Skip ignored operands. */
23267 if (*constraints == 'X')
23270 return memory_address_length (XEXP (recog_data.operand[i], 0));
23275 /* Compute default value for "length_vex" attribute. It includes
23276 2 or 3 byte VEX prefix and 1 opcode byte. */
23279 ix86_attr_length_vex_default (rtx insn, bool has_0f_opcode, bool has_vex_w)
23283 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
23284 byte VEX prefix. */
23285 if (!has_0f_opcode || has_vex_w)
23288 /* We can always use 2 byte VEX prefix in 32bit. */
23292 extract_insn_cached (insn);
23294 for (i = recog_data.n_operands - 1; i >= 0; --i)
23295 if (REG_P (recog_data.operand[i]))
23297 /* REX.W bit uses 3 byte VEX prefix. */
23298 if (GET_MODE (recog_data.operand[i]) == DImode
23299 && GENERAL_REG_P (recog_data.operand[i]))
23304 /* REX.X or REX.B bits use 3 byte VEX prefix. */
23305 if (MEM_P (recog_data.operand[i])
23306 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
23313 /* Return the maximum number of instructions a cpu can issue. */
23316 ix86_issue_rate (void)
23320 case PROCESSOR_PENTIUM:
23321 case PROCESSOR_ATOM:
23325 case PROCESSOR_PENTIUMPRO:
23326 case PROCESSOR_PENTIUM4:
23327 case PROCESSOR_CORE2_32:
23328 case PROCESSOR_CORE2_64:
23329 case PROCESSOR_COREI7_32:
23330 case PROCESSOR_COREI7_64:
23331 case PROCESSOR_ATHLON:
23333 case PROCESSOR_AMDFAM10:
23334 case PROCESSOR_NOCONA:
23335 case PROCESSOR_GENERIC32:
23336 case PROCESSOR_GENERIC64:
23337 case PROCESSOR_BDVER1:
23338 case PROCESSOR_BDVER2:
23339 case PROCESSOR_BTVER1:
23347 /* A subroutine of ix86_adjust_cost -- return TRUE iff INSN reads flags set
23348 by DEP_INSN and nothing set by DEP_INSN. */
23351 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
23355 /* Simplify the test for uninteresting insns. */
23356 if (insn_type != TYPE_SETCC
23357 && insn_type != TYPE_ICMOV
23358 && insn_type != TYPE_FCMOV
23359 && insn_type != TYPE_IBR)
23362 if ((set = single_set (dep_insn)) != 0)
23364 set = SET_DEST (set);
23367 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
23368 && XVECLEN (PATTERN (dep_insn), 0) == 2
23369 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
23370 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
23372 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
23373 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
23378 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
23381 /* This test is true if the dependent insn reads the flags but
23382 not any other potentially set register. */
23383 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
23386 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
23392 /* Return true iff USE_INSN has a memory address with operands set by
23396 ix86_agi_dependent (rtx set_insn, rtx use_insn)
23399 extract_insn_cached (use_insn);
23400 for (i = recog_data.n_operands - 1; i >= 0; --i)
23401 if (MEM_P (recog_data.operand[i]))
23403 rtx addr = XEXP (recog_data.operand[i], 0);
23404 return modified_in_p (addr, set_insn) != 0;
23410 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
23412 enum attr_type insn_type, dep_insn_type;
23413 enum attr_memory memory;
23415 int dep_insn_code_number;
23417 /* Anti and output dependencies have zero cost on all CPUs. */
23418 if (REG_NOTE_KIND (link) != 0)
23421 dep_insn_code_number = recog_memoized (dep_insn);
23423 /* If we can't recognize the insns, we can't really do anything. */
23424 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
23427 insn_type = get_attr_type (insn);
23428 dep_insn_type = get_attr_type (dep_insn);
23432 case PROCESSOR_PENTIUM:
23433 /* Address Generation Interlock adds a cycle of latency. */
23434 if (insn_type == TYPE_LEA)
23436 rtx addr = PATTERN (insn);
23438 if (GET_CODE (addr) == PARALLEL)
23439 addr = XVECEXP (addr, 0, 0);
23441 gcc_assert (GET_CODE (addr) == SET);
23443 addr = SET_SRC (addr);
23444 if (modified_in_p (addr, dep_insn))
23447 else if (ix86_agi_dependent (dep_insn, insn))
23450 /* ??? Compares pair with jump/setcc. */
23451 if (ix86_flags_dependent (insn, dep_insn, insn_type))
23454 /* Floating point stores require value to be ready one cycle earlier. */
23455 if (insn_type == TYPE_FMOV
23456 && get_attr_memory (insn) == MEMORY_STORE
23457 && !ix86_agi_dependent (dep_insn, insn))
23461 case PROCESSOR_PENTIUMPRO:
23462 memory = get_attr_memory (insn);
23464 /* INT->FP conversion is expensive. */
23465 if (get_attr_fp_int_src (dep_insn))
23468 /* There is one cycle extra latency between an FP op and a store. */
23469 if (insn_type == TYPE_FMOV
23470 && (set = single_set (dep_insn)) != NULL_RTX
23471 && (set2 = single_set (insn)) != NULL_RTX
23472 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
23473 && MEM_P (SET_DEST (set2)))
23476 /* Show ability of reorder buffer to hide latency of load by executing
23477 in parallel with previous instruction in case
23478 previous instruction is not needed to compute the address. */
23479 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
23480 && !ix86_agi_dependent (dep_insn, insn))
23482 /* Claim moves to take one cycle, as core can issue one load
23483 at time and the next load can start cycle later. */
23484 if (dep_insn_type == TYPE_IMOV
23485 || dep_insn_type == TYPE_FMOV)
23493 memory = get_attr_memory (insn);
23495 /* The esp dependency is resolved before the instruction is really
23497 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
23498 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
23501 /* INT->FP conversion is expensive. */
23502 if (get_attr_fp_int_src (dep_insn))
23505 /* Show ability of reorder buffer to hide latency of load by executing
23506 in parallel with previous instruction in case
23507 previous instruction is not needed to compute the address. */
23508 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
23509 && !ix86_agi_dependent (dep_insn, insn))
23511 /* Claim moves to take one cycle, as core can issue one load
23512 at time and the next load can start cycle later. */
23513 if (dep_insn_type == TYPE_IMOV
23514 || dep_insn_type == TYPE_FMOV)
23523 case PROCESSOR_ATHLON:
23525 case PROCESSOR_AMDFAM10:
23526 case PROCESSOR_BDVER1:
23527 case PROCESSOR_BDVER2:
23528 case PROCESSOR_BTVER1:
23529 case PROCESSOR_ATOM:
23530 case PROCESSOR_GENERIC32:
23531 case PROCESSOR_GENERIC64:
23532 memory = get_attr_memory (insn);
23534 /* Show ability of reorder buffer to hide latency of load by executing
23535 in parallel with previous instruction in case
23536 previous instruction is not needed to compute the address. */
23537 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
23538 && !ix86_agi_dependent (dep_insn, insn))
23540 enum attr_unit unit = get_attr_unit (insn);
23543 /* Because of the difference between the length of integer and
23544 floating unit pipeline preparation stages, the memory operands
23545 for floating point are cheaper.
23547 ??? For Athlon it the difference is most probably 2. */
23548 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
23551 loadcost = TARGET_ATHLON ? 2 : 0;
23553 if (cost >= loadcost)
23566 /* How many alternative schedules to try. This should be as wide as the
23567 scheduling freedom in the DFA, but no wider. Making this value too
23568 large results extra work for the scheduler. */
23571 ia32_multipass_dfa_lookahead (void)
23575 case PROCESSOR_PENTIUM:
23578 case PROCESSOR_PENTIUMPRO:
23582 case PROCESSOR_CORE2_32:
23583 case PROCESSOR_CORE2_64:
23584 case PROCESSOR_COREI7_32:
23585 case PROCESSOR_COREI7_64:
23586 /* Generally, we want haifa-sched:max_issue() to look ahead as far
23587 as many instructions can be executed on a cycle, i.e.,
23588 issue_rate. I wonder why tuning for many CPUs does not do this. */
23589 return ix86_issue_rate ();
23598 /* Model decoder of Core 2/i7.
23599 Below hooks for multipass scheduling (see haifa-sched.c:max_issue)
23600 track the instruction fetch block boundaries and make sure that long
23601 (9+ bytes) instructions are assigned to D0. */
23603 /* Maximum length of an insn that can be handled by
23604 a secondary decoder unit. '8' for Core 2/i7. */
23605 static int core2i7_secondary_decoder_max_insn_size;
23607 /* Ifetch block size, i.e., number of bytes decoder reads per cycle.
23608 '16' for Core 2/i7. */
23609 static int core2i7_ifetch_block_size;
23611 /* Maximum number of instructions decoder can handle per cycle.
23612 '6' for Core 2/i7. */
23613 static int core2i7_ifetch_block_max_insns;
23615 typedef struct ix86_first_cycle_multipass_data_ *
23616 ix86_first_cycle_multipass_data_t;
23617 typedef const struct ix86_first_cycle_multipass_data_ *
23618 const_ix86_first_cycle_multipass_data_t;
23620 /* A variable to store target state across calls to max_issue within
23622 static struct ix86_first_cycle_multipass_data_ _ix86_first_cycle_multipass_data,
23623 *ix86_first_cycle_multipass_data = &_ix86_first_cycle_multipass_data;
23625 /* Initialize DATA. */
23627 core2i7_first_cycle_multipass_init (void *_data)
23629 ix86_first_cycle_multipass_data_t data
23630 = (ix86_first_cycle_multipass_data_t) _data;
23632 data->ifetch_block_len = 0;
23633 data->ifetch_block_n_insns = 0;
23634 data->ready_try_change = NULL;
23635 data->ready_try_change_size = 0;
23638 /* Advancing the cycle; reset ifetch block counts. */
23640 core2i7_dfa_post_advance_cycle (void)
23642 ix86_first_cycle_multipass_data_t data = ix86_first_cycle_multipass_data;
23644 gcc_assert (data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
23646 data->ifetch_block_len = 0;
23647 data->ifetch_block_n_insns = 0;
23650 static int min_insn_size (rtx);
23652 /* Filter out insns from ready_try that the core will not be able to issue
23653 on current cycle due to decoder. */
23655 core2i7_first_cycle_multipass_filter_ready_try
23656 (const_ix86_first_cycle_multipass_data_t data,
23657 char *ready_try, int n_ready, bool first_cycle_insn_p)
23664 if (ready_try[n_ready])
23667 insn = get_ready_element (n_ready);
23668 insn_size = min_insn_size (insn);
23670 if (/* If this is a too long an insn for a secondary decoder ... */
23671 (!first_cycle_insn_p
23672 && insn_size > core2i7_secondary_decoder_max_insn_size)
23673 /* ... or it would not fit into the ifetch block ... */
23674 || data->ifetch_block_len + insn_size > core2i7_ifetch_block_size
23675 /* ... or the decoder is full already ... */
23676 || data->ifetch_block_n_insns + 1 > core2i7_ifetch_block_max_insns)
23677 /* ... mask the insn out. */
23679 ready_try[n_ready] = 1;
23681 if (data->ready_try_change)
23682 SET_BIT (data->ready_try_change, n_ready);
23687 /* Prepare for a new round of multipass lookahead scheduling. */
23689 core2i7_first_cycle_multipass_begin (void *_data, char *ready_try, int n_ready,
23690 bool first_cycle_insn_p)
23692 ix86_first_cycle_multipass_data_t data
23693 = (ix86_first_cycle_multipass_data_t) _data;
23694 const_ix86_first_cycle_multipass_data_t prev_data
23695 = ix86_first_cycle_multipass_data;
23697 /* Restore the state from the end of the previous round. */
23698 data->ifetch_block_len = prev_data->ifetch_block_len;
23699 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns;
23701 /* Filter instructions that cannot be issued on current cycle due to
23702 decoder restrictions. */
23703 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
23704 first_cycle_insn_p);
23707 /* INSN is being issued in current solution. Account for its impact on
23708 the decoder model. */
23710 core2i7_first_cycle_multipass_issue (void *_data, char *ready_try, int n_ready,
23711 rtx insn, const void *_prev_data)
23713 ix86_first_cycle_multipass_data_t data
23714 = (ix86_first_cycle_multipass_data_t) _data;
23715 const_ix86_first_cycle_multipass_data_t prev_data
23716 = (const_ix86_first_cycle_multipass_data_t) _prev_data;
23718 int insn_size = min_insn_size (insn);
23720 data->ifetch_block_len = prev_data->ifetch_block_len + insn_size;
23721 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns + 1;
23722 gcc_assert (data->ifetch_block_len <= core2i7_ifetch_block_size
23723 && data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
23725 /* Allocate or resize the bitmap for storing INSN's effect on ready_try. */
23726 if (!data->ready_try_change)
23728 data->ready_try_change = sbitmap_alloc (n_ready);
23729 data->ready_try_change_size = n_ready;
23731 else if (data->ready_try_change_size < n_ready)
23733 data->ready_try_change = sbitmap_resize (data->ready_try_change,
23735 data->ready_try_change_size = n_ready;
23737 sbitmap_zero (data->ready_try_change);
23739 /* Filter out insns from ready_try that the core will not be able to issue
23740 on current cycle due to decoder. */
23741 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
23745 /* Revert the effect on ready_try. */
23747 core2i7_first_cycle_multipass_backtrack (const void *_data,
23749 int n_ready ATTRIBUTE_UNUSED)
23751 const_ix86_first_cycle_multipass_data_t data
23752 = (const_ix86_first_cycle_multipass_data_t) _data;
23753 unsigned int i = 0;
23754 sbitmap_iterator sbi;
23756 gcc_assert (sbitmap_last_set_bit (data->ready_try_change) < n_ready);
23757 EXECUTE_IF_SET_IN_SBITMAP (data->ready_try_change, 0, i, sbi)
23763 /* Save the result of multipass lookahead scheduling for the next round. */
23765 core2i7_first_cycle_multipass_end (const void *_data)
23767 const_ix86_first_cycle_multipass_data_t data
23768 = (const_ix86_first_cycle_multipass_data_t) _data;
23769 ix86_first_cycle_multipass_data_t next_data
23770 = ix86_first_cycle_multipass_data;
23774 next_data->ifetch_block_len = data->ifetch_block_len;
23775 next_data->ifetch_block_n_insns = data->ifetch_block_n_insns;
23779 /* Deallocate target data. */
23781 core2i7_first_cycle_multipass_fini (void *_data)
23783 ix86_first_cycle_multipass_data_t data
23784 = (ix86_first_cycle_multipass_data_t) _data;
23786 if (data->ready_try_change)
23788 sbitmap_free (data->ready_try_change);
23789 data->ready_try_change = NULL;
23790 data->ready_try_change_size = 0;
23794 /* Prepare for scheduling pass. */
23796 ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
23797 int verbose ATTRIBUTE_UNUSED,
23798 int max_uid ATTRIBUTE_UNUSED)
23800 /* Install scheduling hooks for current CPU. Some of these hooks are used
23801 in time-critical parts of the scheduler, so we only set them up when
23802 they are actually used. */
23805 case PROCESSOR_CORE2_32:
23806 case PROCESSOR_CORE2_64:
23807 case PROCESSOR_COREI7_32:
23808 case PROCESSOR_COREI7_64:
23809 targetm.sched.dfa_post_advance_cycle
23810 = core2i7_dfa_post_advance_cycle;
23811 targetm.sched.first_cycle_multipass_init
23812 = core2i7_first_cycle_multipass_init;
23813 targetm.sched.first_cycle_multipass_begin
23814 = core2i7_first_cycle_multipass_begin;
23815 targetm.sched.first_cycle_multipass_issue
23816 = core2i7_first_cycle_multipass_issue;
23817 targetm.sched.first_cycle_multipass_backtrack
23818 = core2i7_first_cycle_multipass_backtrack;
23819 targetm.sched.first_cycle_multipass_end
23820 = core2i7_first_cycle_multipass_end;
23821 targetm.sched.first_cycle_multipass_fini
23822 = core2i7_first_cycle_multipass_fini;
23824 /* Set decoder parameters. */
23825 core2i7_secondary_decoder_max_insn_size = 8;
23826 core2i7_ifetch_block_size = 16;
23827 core2i7_ifetch_block_max_insns = 6;
23831 targetm.sched.dfa_post_advance_cycle = NULL;
23832 targetm.sched.first_cycle_multipass_init = NULL;
23833 targetm.sched.first_cycle_multipass_begin = NULL;
23834 targetm.sched.first_cycle_multipass_issue = NULL;
23835 targetm.sched.first_cycle_multipass_backtrack = NULL;
23836 targetm.sched.first_cycle_multipass_end = NULL;
23837 targetm.sched.first_cycle_multipass_fini = NULL;
23843 /* Compute the alignment given to a constant that is being placed in memory.
23844 EXP is the constant and ALIGN is the alignment that the object would
23846 The value of this function is used instead of that alignment to align
23850 ix86_constant_alignment (tree exp, int align)
23852 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
23853 || TREE_CODE (exp) == INTEGER_CST)
23855 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
23857 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
23860 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
23861 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
23862 return BITS_PER_WORD;
23867 /* Compute the alignment for a static variable.
23868 TYPE is the data type, and ALIGN is the alignment that
23869 the object would ordinarily have. The value of this function is used
23870 instead of that alignment to align the object. */
23873 ix86_data_alignment (tree type, int align)
23875 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
23877 if (AGGREGATE_TYPE_P (type)
23878 && TYPE_SIZE (type)
23879 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23880 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
23881 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
23882 && align < max_align)
23885 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23886 to 16byte boundary. */
23889 if (AGGREGATE_TYPE_P (type)
23890 && TYPE_SIZE (type)
23891 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
23892 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
23893 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
23897 if (TREE_CODE (type) == ARRAY_TYPE)
23899 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
23901 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
23904 else if (TREE_CODE (type) == COMPLEX_TYPE)
23907 if (TYPE_MODE (type) == DCmode && align < 64)
23909 if ((TYPE_MODE (type) == XCmode
23910 || TYPE_MODE (type) == TCmode) && align < 128)
23913 else if ((TREE_CODE (type) == RECORD_TYPE
23914 || TREE_CODE (type) == UNION_TYPE
23915 || TREE_CODE (type) == QUAL_UNION_TYPE)
23916 && TYPE_FIELDS (type))
23918 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
23920 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
23923 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
23924 || TREE_CODE (type) == INTEGER_TYPE)
23926 if (TYPE_MODE (type) == DFmode && align < 64)
23928 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
23935 /* Compute the alignment for a local variable or a stack slot. EXP is
23936 the data type or decl itself, MODE is the widest mode available and
23937 ALIGN is the alignment that the object would ordinarily have. The
23938 value of this macro is used instead of that alignment to align the
23942 ix86_local_alignment (tree exp, enum machine_mode mode,
23943 unsigned int align)
23947 if (exp && DECL_P (exp))
23949 type = TREE_TYPE (exp);
23958 /* Don't do dynamic stack realignment for long long objects with
23959 -mpreferred-stack-boundary=2. */
23962 && ix86_preferred_stack_boundary < 64
23963 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
23964 && (!type || !TYPE_USER_ALIGN (type))
23965 && (!decl || !DECL_USER_ALIGN (decl)))
23968 /* If TYPE is NULL, we are allocating a stack slot for caller-save
23969 register in MODE. We will return the largest alignment of XF
23973 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
23974 align = GET_MODE_ALIGNMENT (DFmode);
23978 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
23979 to 16byte boundary. Exact wording is:
23981 An array uses the same alignment as its elements, except that a local or
23982 global array variable of length at least 16 bytes or
23983 a C99 variable-length array variable always has alignment of at least 16 bytes.
23985 This was added to allow use of aligned SSE instructions at arrays. This
23986 rule is meant for static storage (where compiler can not do the analysis
23987 by itself). We follow it for automatic variables only when convenient.
23988 We fully control everything in the function compiled and functions from
23989 other unit can not rely on the alignment.
23991 Exclude va_list type. It is the common case of local array where
23992 we can not benefit from the alignment. */
23993 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
23996 if (AGGREGATE_TYPE_P (type)
23997 && (va_list_type_node == NULL_TREE
23998 || (TYPE_MAIN_VARIANT (type)
23999 != TYPE_MAIN_VARIANT (va_list_type_node)))
24000 && TYPE_SIZE (type)
24001 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
24002 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
24003 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
24006 if (TREE_CODE (type) == ARRAY_TYPE)
24008 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
24010 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
24013 else if (TREE_CODE (type) == COMPLEX_TYPE)
24015 if (TYPE_MODE (type) == DCmode && align < 64)
24017 if ((TYPE_MODE (type) == XCmode
24018 || TYPE_MODE (type) == TCmode) && align < 128)
24021 else if ((TREE_CODE (type) == RECORD_TYPE
24022 || TREE_CODE (type) == UNION_TYPE
24023 || TREE_CODE (type) == QUAL_UNION_TYPE)
24024 && TYPE_FIELDS (type))
24026 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
24028 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
24031 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
24032 || TREE_CODE (type) == INTEGER_TYPE)
24035 if (TYPE_MODE (type) == DFmode && align < 64)
24037 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
24043 /* Compute the minimum required alignment for dynamic stack realignment
24044 purposes for a local variable, parameter or a stack slot. EXP is
24045 the data type or decl itself, MODE is its mode and ALIGN is the
24046 alignment that the object would ordinarily have. */
24049 ix86_minimum_alignment (tree exp, enum machine_mode mode,
24050 unsigned int align)
24054 if (exp && DECL_P (exp))
24056 type = TREE_TYPE (exp);
24065 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
24068 /* Don't do dynamic stack realignment for long long objects with
24069 -mpreferred-stack-boundary=2. */
24070 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
24071 && (!type || !TYPE_USER_ALIGN (type))
24072 && (!decl || !DECL_USER_ALIGN (decl)))
24078 /* Find a location for the static chain incoming to a nested function.
24079 This is a register, unless all free registers are used by arguments. */
24082 ix86_static_chain (const_tree fndecl, bool incoming_p)
24086 if (!DECL_STATIC_CHAIN (fndecl))
24091 /* We always use R10 in 64-bit mode. */
24099 /* By default in 32-bit mode we use ECX to pass the static chain. */
24102 fntype = TREE_TYPE (fndecl);
24103 ccvt = ix86_get_callcvt (fntype);
24104 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) != 0)
24106 /* Fastcall functions use ecx/edx for arguments, which leaves
24107 us with EAX for the static chain.
24108 Thiscall functions use ecx for arguments, which also
24109 leaves us with EAX for the static chain. */
24112 else if (ix86_function_regparm (fntype, fndecl) == 3)
24114 /* For regparm 3, we have no free call-clobbered registers in
24115 which to store the static chain. In order to implement this,
24116 we have the trampoline push the static chain to the stack.
24117 However, we can't push a value below the return address when
24118 we call the nested function directly, so we have to use an
24119 alternate entry point. For this we use ESI, and have the
24120 alternate entry point push ESI, so that things appear the
24121 same once we're executing the nested function. */
24124 if (fndecl == current_function_decl)
24125 ix86_static_chain_on_stack = true;
24126 return gen_frame_mem (SImode,
24127 plus_constant (arg_pointer_rtx, -8));
24133 return gen_rtx_REG (Pmode, regno);
24136 /* Emit RTL insns to initialize the variable parts of a trampoline.
24137 FNDECL is the decl of the target address; M_TRAMP is a MEM for
24138 the trampoline, and CHAIN_VALUE is an RTX for the static chain
24139 to be passed to the target function. */
24142 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
24148 fnaddr = XEXP (DECL_RTL (fndecl), 0);
24154 /* Load the function address to r11. Try to load address using
24155 the shorter movl instead of movabs. We may want to support
24156 movq for kernel mode, but kernel does not use trampolines at
24158 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
24160 fnaddr = copy_to_mode_reg (DImode, fnaddr);
24162 mem = adjust_address (m_tramp, HImode, offset);
24163 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
24165 mem = adjust_address (m_tramp, SImode, offset + 2);
24166 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
24171 mem = adjust_address (m_tramp, HImode, offset);
24172 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
24174 mem = adjust_address (m_tramp, DImode, offset + 2);
24175 emit_move_insn (mem, fnaddr);
24179 /* Load static chain using movabs to r10. Use the
24180 shorter movl instead of movabs for x32. */
24192 mem = adjust_address (m_tramp, HImode, offset);
24193 emit_move_insn (mem, gen_int_mode (opcode, HImode));
24195 mem = adjust_address (m_tramp, ptr_mode, offset + 2);
24196 emit_move_insn (mem, chain_value);
24199 /* Jump to r11; the last (unused) byte is a nop, only there to
24200 pad the write out to a single 32-bit store. */
24201 mem = adjust_address (m_tramp, SImode, offset);
24202 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
24209 /* Depending on the static chain location, either load a register
24210 with a constant, or push the constant to the stack. All of the
24211 instructions are the same size. */
24212 chain = ix86_static_chain (fndecl, true);
24215 switch (REGNO (chain))
24218 opcode = 0xb8; break;
24220 opcode = 0xb9; break;
24222 gcc_unreachable ();
24228 mem = adjust_address (m_tramp, QImode, offset);
24229 emit_move_insn (mem, gen_int_mode (opcode, QImode));
24231 mem = adjust_address (m_tramp, SImode, offset + 1);
24232 emit_move_insn (mem, chain_value);
24235 mem = adjust_address (m_tramp, QImode, offset);
24236 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
24238 mem = adjust_address (m_tramp, SImode, offset + 1);
24240 /* Compute offset from the end of the jmp to the target function.
24241 In the case in which the trampoline stores the static chain on
24242 the stack, we need to skip the first insn which pushes the
24243 (call-saved) register static chain; this push is 1 byte. */
24245 disp = expand_binop (SImode, sub_optab, fnaddr,
24246 plus_constant (XEXP (m_tramp, 0),
24247 offset - (MEM_P (chain) ? 1 : 0)),
24248 NULL_RTX, 1, OPTAB_DIRECT);
24249 emit_move_insn (mem, disp);
24252 gcc_assert (offset <= TRAMPOLINE_SIZE);
24254 #ifdef HAVE_ENABLE_EXECUTE_STACK
24255 #ifdef CHECK_EXECUTE_STACK_ENABLED
24256 if (CHECK_EXECUTE_STACK_ENABLED)
24258 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
24259 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
24263 /* The following file contains several enumerations and data structures
24264 built from the definitions in i386-builtin-types.def. */
24266 #include "i386-builtin-types.inc"
24268 /* Table for the ix86 builtin non-function types. */
24269 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
24271 /* Retrieve an element from the above table, building some of
24272 the types lazily. */
24275 ix86_get_builtin_type (enum ix86_builtin_type tcode)
24277 unsigned int index;
24280 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
24282 type = ix86_builtin_type_tab[(int) tcode];
24286 gcc_assert (tcode > IX86_BT_LAST_PRIM);
24287 if (tcode <= IX86_BT_LAST_VECT)
24289 enum machine_mode mode;
24291 index = tcode - IX86_BT_LAST_PRIM - 1;
24292 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
24293 mode = ix86_builtin_type_vect_mode[index];
24295 type = build_vector_type_for_mode (itype, mode);
24301 index = tcode - IX86_BT_LAST_VECT - 1;
24302 if (tcode <= IX86_BT_LAST_PTR)
24303 quals = TYPE_UNQUALIFIED;
24305 quals = TYPE_QUAL_CONST;
24307 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
24308 if (quals != TYPE_UNQUALIFIED)
24309 itype = build_qualified_type (itype, quals);
24311 type = build_pointer_type (itype);
24314 ix86_builtin_type_tab[(int) tcode] = type;
24318 /* Table for the ix86 builtin function types. */
24319 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
24321 /* Retrieve an element from the above table, building some of
24322 the types lazily. */
24325 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
24329 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
24331 type = ix86_builtin_func_type_tab[(int) tcode];
24335 if (tcode <= IX86_BT_LAST_FUNC)
24337 unsigned start = ix86_builtin_func_start[(int) tcode];
24338 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
24339 tree rtype, atype, args = void_list_node;
24342 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
24343 for (i = after - 1; i > start; --i)
24345 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
24346 args = tree_cons (NULL, atype, args);
24349 type = build_function_type (rtype, args);
24353 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
24354 enum ix86_builtin_func_type icode;
24356 icode = ix86_builtin_func_alias_base[index];
24357 type = ix86_get_builtin_func_type (icode);
24360 ix86_builtin_func_type_tab[(int) tcode] = type;
24365 /* Codes for all the SSE/MMX builtins. */
24368 IX86_BUILTIN_ADDPS,
24369 IX86_BUILTIN_ADDSS,
24370 IX86_BUILTIN_DIVPS,
24371 IX86_BUILTIN_DIVSS,
24372 IX86_BUILTIN_MULPS,
24373 IX86_BUILTIN_MULSS,
24374 IX86_BUILTIN_SUBPS,
24375 IX86_BUILTIN_SUBSS,
24377 IX86_BUILTIN_CMPEQPS,
24378 IX86_BUILTIN_CMPLTPS,
24379 IX86_BUILTIN_CMPLEPS,
24380 IX86_BUILTIN_CMPGTPS,
24381 IX86_BUILTIN_CMPGEPS,
24382 IX86_BUILTIN_CMPNEQPS,
24383 IX86_BUILTIN_CMPNLTPS,
24384 IX86_BUILTIN_CMPNLEPS,
24385 IX86_BUILTIN_CMPNGTPS,
24386 IX86_BUILTIN_CMPNGEPS,
24387 IX86_BUILTIN_CMPORDPS,
24388 IX86_BUILTIN_CMPUNORDPS,
24389 IX86_BUILTIN_CMPEQSS,
24390 IX86_BUILTIN_CMPLTSS,
24391 IX86_BUILTIN_CMPLESS,
24392 IX86_BUILTIN_CMPNEQSS,
24393 IX86_BUILTIN_CMPNLTSS,
24394 IX86_BUILTIN_CMPNLESS,
24395 IX86_BUILTIN_CMPNGTSS,
24396 IX86_BUILTIN_CMPNGESS,
24397 IX86_BUILTIN_CMPORDSS,
24398 IX86_BUILTIN_CMPUNORDSS,
24400 IX86_BUILTIN_COMIEQSS,
24401 IX86_BUILTIN_COMILTSS,
24402 IX86_BUILTIN_COMILESS,
24403 IX86_BUILTIN_COMIGTSS,
24404 IX86_BUILTIN_COMIGESS,
24405 IX86_BUILTIN_COMINEQSS,
24406 IX86_BUILTIN_UCOMIEQSS,
24407 IX86_BUILTIN_UCOMILTSS,
24408 IX86_BUILTIN_UCOMILESS,
24409 IX86_BUILTIN_UCOMIGTSS,
24410 IX86_BUILTIN_UCOMIGESS,
24411 IX86_BUILTIN_UCOMINEQSS,
24413 IX86_BUILTIN_CVTPI2PS,
24414 IX86_BUILTIN_CVTPS2PI,
24415 IX86_BUILTIN_CVTSI2SS,
24416 IX86_BUILTIN_CVTSI642SS,
24417 IX86_BUILTIN_CVTSS2SI,
24418 IX86_BUILTIN_CVTSS2SI64,
24419 IX86_BUILTIN_CVTTPS2PI,
24420 IX86_BUILTIN_CVTTSS2SI,
24421 IX86_BUILTIN_CVTTSS2SI64,
24423 IX86_BUILTIN_MAXPS,
24424 IX86_BUILTIN_MAXSS,
24425 IX86_BUILTIN_MINPS,
24426 IX86_BUILTIN_MINSS,
24428 IX86_BUILTIN_LOADUPS,
24429 IX86_BUILTIN_STOREUPS,
24430 IX86_BUILTIN_MOVSS,
24432 IX86_BUILTIN_MOVHLPS,
24433 IX86_BUILTIN_MOVLHPS,
24434 IX86_BUILTIN_LOADHPS,
24435 IX86_BUILTIN_LOADLPS,
24436 IX86_BUILTIN_STOREHPS,
24437 IX86_BUILTIN_STORELPS,
24439 IX86_BUILTIN_MASKMOVQ,
24440 IX86_BUILTIN_MOVMSKPS,
24441 IX86_BUILTIN_PMOVMSKB,
24443 IX86_BUILTIN_MOVNTPS,
24444 IX86_BUILTIN_MOVNTQ,
24446 IX86_BUILTIN_LOADDQU,
24447 IX86_BUILTIN_STOREDQU,
24449 IX86_BUILTIN_PACKSSWB,
24450 IX86_BUILTIN_PACKSSDW,
24451 IX86_BUILTIN_PACKUSWB,
24453 IX86_BUILTIN_PADDB,
24454 IX86_BUILTIN_PADDW,
24455 IX86_BUILTIN_PADDD,
24456 IX86_BUILTIN_PADDQ,
24457 IX86_BUILTIN_PADDSB,
24458 IX86_BUILTIN_PADDSW,
24459 IX86_BUILTIN_PADDUSB,
24460 IX86_BUILTIN_PADDUSW,
24461 IX86_BUILTIN_PSUBB,
24462 IX86_BUILTIN_PSUBW,
24463 IX86_BUILTIN_PSUBD,
24464 IX86_BUILTIN_PSUBQ,
24465 IX86_BUILTIN_PSUBSB,
24466 IX86_BUILTIN_PSUBSW,
24467 IX86_BUILTIN_PSUBUSB,
24468 IX86_BUILTIN_PSUBUSW,
24471 IX86_BUILTIN_PANDN,
24475 IX86_BUILTIN_PAVGB,
24476 IX86_BUILTIN_PAVGW,
24478 IX86_BUILTIN_PCMPEQB,
24479 IX86_BUILTIN_PCMPEQW,
24480 IX86_BUILTIN_PCMPEQD,
24481 IX86_BUILTIN_PCMPGTB,
24482 IX86_BUILTIN_PCMPGTW,
24483 IX86_BUILTIN_PCMPGTD,
24485 IX86_BUILTIN_PMADDWD,
24487 IX86_BUILTIN_PMAXSW,
24488 IX86_BUILTIN_PMAXUB,
24489 IX86_BUILTIN_PMINSW,
24490 IX86_BUILTIN_PMINUB,
24492 IX86_BUILTIN_PMULHUW,
24493 IX86_BUILTIN_PMULHW,
24494 IX86_BUILTIN_PMULLW,
24496 IX86_BUILTIN_PSADBW,
24497 IX86_BUILTIN_PSHUFW,
24499 IX86_BUILTIN_PSLLW,
24500 IX86_BUILTIN_PSLLD,
24501 IX86_BUILTIN_PSLLQ,
24502 IX86_BUILTIN_PSRAW,
24503 IX86_BUILTIN_PSRAD,
24504 IX86_BUILTIN_PSRLW,
24505 IX86_BUILTIN_PSRLD,
24506 IX86_BUILTIN_PSRLQ,
24507 IX86_BUILTIN_PSLLWI,
24508 IX86_BUILTIN_PSLLDI,
24509 IX86_BUILTIN_PSLLQI,
24510 IX86_BUILTIN_PSRAWI,
24511 IX86_BUILTIN_PSRADI,
24512 IX86_BUILTIN_PSRLWI,
24513 IX86_BUILTIN_PSRLDI,
24514 IX86_BUILTIN_PSRLQI,
24516 IX86_BUILTIN_PUNPCKHBW,
24517 IX86_BUILTIN_PUNPCKHWD,
24518 IX86_BUILTIN_PUNPCKHDQ,
24519 IX86_BUILTIN_PUNPCKLBW,
24520 IX86_BUILTIN_PUNPCKLWD,
24521 IX86_BUILTIN_PUNPCKLDQ,
24523 IX86_BUILTIN_SHUFPS,
24525 IX86_BUILTIN_RCPPS,
24526 IX86_BUILTIN_RCPSS,
24527 IX86_BUILTIN_RSQRTPS,
24528 IX86_BUILTIN_RSQRTPS_NR,
24529 IX86_BUILTIN_RSQRTSS,
24530 IX86_BUILTIN_RSQRTF,
24531 IX86_BUILTIN_SQRTPS,
24532 IX86_BUILTIN_SQRTPS_NR,
24533 IX86_BUILTIN_SQRTSS,
24535 IX86_BUILTIN_UNPCKHPS,
24536 IX86_BUILTIN_UNPCKLPS,
24538 IX86_BUILTIN_ANDPS,
24539 IX86_BUILTIN_ANDNPS,
24541 IX86_BUILTIN_XORPS,
24544 IX86_BUILTIN_LDMXCSR,
24545 IX86_BUILTIN_STMXCSR,
24546 IX86_BUILTIN_SFENCE,
24548 /* 3DNow! Original */
24549 IX86_BUILTIN_FEMMS,
24550 IX86_BUILTIN_PAVGUSB,
24551 IX86_BUILTIN_PF2ID,
24552 IX86_BUILTIN_PFACC,
24553 IX86_BUILTIN_PFADD,
24554 IX86_BUILTIN_PFCMPEQ,
24555 IX86_BUILTIN_PFCMPGE,
24556 IX86_BUILTIN_PFCMPGT,
24557 IX86_BUILTIN_PFMAX,
24558 IX86_BUILTIN_PFMIN,
24559 IX86_BUILTIN_PFMUL,
24560 IX86_BUILTIN_PFRCP,
24561 IX86_BUILTIN_PFRCPIT1,
24562 IX86_BUILTIN_PFRCPIT2,
24563 IX86_BUILTIN_PFRSQIT1,
24564 IX86_BUILTIN_PFRSQRT,
24565 IX86_BUILTIN_PFSUB,
24566 IX86_BUILTIN_PFSUBR,
24567 IX86_BUILTIN_PI2FD,
24568 IX86_BUILTIN_PMULHRW,
24570 /* 3DNow! Athlon Extensions */
24571 IX86_BUILTIN_PF2IW,
24572 IX86_BUILTIN_PFNACC,
24573 IX86_BUILTIN_PFPNACC,
24574 IX86_BUILTIN_PI2FW,
24575 IX86_BUILTIN_PSWAPDSI,
24576 IX86_BUILTIN_PSWAPDSF,
24579 IX86_BUILTIN_ADDPD,
24580 IX86_BUILTIN_ADDSD,
24581 IX86_BUILTIN_DIVPD,
24582 IX86_BUILTIN_DIVSD,
24583 IX86_BUILTIN_MULPD,
24584 IX86_BUILTIN_MULSD,
24585 IX86_BUILTIN_SUBPD,
24586 IX86_BUILTIN_SUBSD,
24588 IX86_BUILTIN_CMPEQPD,
24589 IX86_BUILTIN_CMPLTPD,
24590 IX86_BUILTIN_CMPLEPD,
24591 IX86_BUILTIN_CMPGTPD,
24592 IX86_BUILTIN_CMPGEPD,
24593 IX86_BUILTIN_CMPNEQPD,
24594 IX86_BUILTIN_CMPNLTPD,
24595 IX86_BUILTIN_CMPNLEPD,
24596 IX86_BUILTIN_CMPNGTPD,
24597 IX86_BUILTIN_CMPNGEPD,
24598 IX86_BUILTIN_CMPORDPD,
24599 IX86_BUILTIN_CMPUNORDPD,
24600 IX86_BUILTIN_CMPEQSD,
24601 IX86_BUILTIN_CMPLTSD,
24602 IX86_BUILTIN_CMPLESD,
24603 IX86_BUILTIN_CMPNEQSD,
24604 IX86_BUILTIN_CMPNLTSD,
24605 IX86_BUILTIN_CMPNLESD,
24606 IX86_BUILTIN_CMPORDSD,
24607 IX86_BUILTIN_CMPUNORDSD,
24609 IX86_BUILTIN_COMIEQSD,
24610 IX86_BUILTIN_COMILTSD,
24611 IX86_BUILTIN_COMILESD,
24612 IX86_BUILTIN_COMIGTSD,
24613 IX86_BUILTIN_COMIGESD,
24614 IX86_BUILTIN_COMINEQSD,
24615 IX86_BUILTIN_UCOMIEQSD,
24616 IX86_BUILTIN_UCOMILTSD,
24617 IX86_BUILTIN_UCOMILESD,
24618 IX86_BUILTIN_UCOMIGTSD,
24619 IX86_BUILTIN_UCOMIGESD,
24620 IX86_BUILTIN_UCOMINEQSD,
24622 IX86_BUILTIN_MAXPD,
24623 IX86_BUILTIN_MAXSD,
24624 IX86_BUILTIN_MINPD,
24625 IX86_BUILTIN_MINSD,
24627 IX86_BUILTIN_ANDPD,
24628 IX86_BUILTIN_ANDNPD,
24630 IX86_BUILTIN_XORPD,
24632 IX86_BUILTIN_SQRTPD,
24633 IX86_BUILTIN_SQRTSD,
24635 IX86_BUILTIN_UNPCKHPD,
24636 IX86_BUILTIN_UNPCKLPD,
24638 IX86_BUILTIN_SHUFPD,
24640 IX86_BUILTIN_LOADUPD,
24641 IX86_BUILTIN_STOREUPD,
24642 IX86_BUILTIN_MOVSD,
24644 IX86_BUILTIN_LOADHPD,
24645 IX86_BUILTIN_LOADLPD,
24647 IX86_BUILTIN_CVTDQ2PD,
24648 IX86_BUILTIN_CVTDQ2PS,
24650 IX86_BUILTIN_CVTPD2DQ,
24651 IX86_BUILTIN_CVTPD2PI,
24652 IX86_BUILTIN_CVTPD2PS,
24653 IX86_BUILTIN_CVTTPD2DQ,
24654 IX86_BUILTIN_CVTTPD2PI,
24656 IX86_BUILTIN_CVTPI2PD,
24657 IX86_BUILTIN_CVTSI2SD,
24658 IX86_BUILTIN_CVTSI642SD,
24660 IX86_BUILTIN_CVTSD2SI,
24661 IX86_BUILTIN_CVTSD2SI64,
24662 IX86_BUILTIN_CVTSD2SS,
24663 IX86_BUILTIN_CVTSS2SD,
24664 IX86_BUILTIN_CVTTSD2SI,
24665 IX86_BUILTIN_CVTTSD2SI64,
24667 IX86_BUILTIN_CVTPS2DQ,
24668 IX86_BUILTIN_CVTPS2PD,
24669 IX86_BUILTIN_CVTTPS2DQ,
24671 IX86_BUILTIN_MOVNTI,
24672 IX86_BUILTIN_MOVNTPD,
24673 IX86_BUILTIN_MOVNTDQ,
24675 IX86_BUILTIN_MOVQ128,
24678 IX86_BUILTIN_MASKMOVDQU,
24679 IX86_BUILTIN_MOVMSKPD,
24680 IX86_BUILTIN_PMOVMSKB128,
24682 IX86_BUILTIN_PACKSSWB128,
24683 IX86_BUILTIN_PACKSSDW128,
24684 IX86_BUILTIN_PACKUSWB128,
24686 IX86_BUILTIN_PADDB128,
24687 IX86_BUILTIN_PADDW128,
24688 IX86_BUILTIN_PADDD128,
24689 IX86_BUILTIN_PADDQ128,
24690 IX86_BUILTIN_PADDSB128,
24691 IX86_BUILTIN_PADDSW128,
24692 IX86_BUILTIN_PADDUSB128,
24693 IX86_BUILTIN_PADDUSW128,
24694 IX86_BUILTIN_PSUBB128,
24695 IX86_BUILTIN_PSUBW128,
24696 IX86_BUILTIN_PSUBD128,
24697 IX86_BUILTIN_PSUBQ128,
24698 IX86_BUILTIN_PSUBSB128,
24699 IX86_BUILTIN_PSUBSW128,
24700 IX86_BUILTIN_PSUBUSB128,
24701 IX86_BUILTIN_PSUBUSW128,
24703 IX86_BUILTIN_PAND128,
24704 IX86_BUILTIN_PANDN128,
24705 IX86_BUILTIN_POR128,
24706 IX86_BUILTIN_PXOR128,
24708 IX86_BUILTIN_PAVGB128,
24709 IX86_BUILTIN_PAVGW128,
24711 IX86_BUILTIN_PCMPEQB128,
24712 IX86_BUILTIN_PCMPEQW128,
24713 IX86_BUILTIN_PCMPEQD128,
24714 IX86_BUILTIN_PCMPGTB128,
24715 IX86_BUILTIN_PCMPGTW128,
24716 IX86_BUILTIN_PCMPGTD128,
24718 IX86_BUILTIN_PMADDWD128,
24720 IX86_BUILTIN_PMAXSW128,
24721 IX86_BUILTIN_PMAXUB128,
24722 IX86_BUILTIN_PMINSW128,
24723 IX86_BUILTIN_PMINUB128,
24725 IX86_BUILTIN_PMULUDQ,
24726 IX86_BUILTIN_PMULUDQ128,
24727 IX86_BUILTIN_PMULHUW128,
24728 IX86_BUILTIN_PMULHW128,
24729 IX86_BUILTIN_PMULLW128,
24731 IX86_BUILTIN_PSADBW128,
24732 IX86_BUILTIN_PSHUFHW,
24733 IX86_BUILTIN_PSHUFLW,
24734 IX86_BUILTIN_PSHUFD,
24736 IX86_BUILTIN_PSLLDQI128,
24737 IX86_BUILTIN_PSLLWI128,
24738 IX86_BUILTIN_PSLLDI128,
24739 IX86_BUILTIN_PSLLQI128,
24740 IX86_BUILTIN_PSRAWI128,
24741 IX86_BUILTIN_PSRADI128,
24742 IX86_BUILTIN_PSRLDQI128,
24743 IX86_BUILTIN_PSRLWI128,
24744 IX86_BUILTIN_PSRLDI128,
24745 IX86_BUILTIN_PSRLQI128,
24747 IX86_BUILTIN_PSLLDQ128,
24748 IX86_BUILTIN_PSLLW128,
24749 IX86_BUILTIN_PSLLD128,
24750 IX86_BUILTIN_PSLLQ128,
24751 IX86_BUILTIN_PSRAW128,
24752 IX86_BUILTIN_PSRAD128,
24753 IX86_BUILTIN_PSRLW128,
24754 IX86_BUILTIN_PSRLD128,
24755 IX86_BUILTIN_PSRLQ128,
24757 IX86_BUILTIN_PUNPCKHBW128,
24758 IX86_BUILTIN_PUNPCKHWD128,
24759 IX86_BUILTIN_PUNPCKHDQ128,
24760 IX86_BUILTIN_PUNPCKHQDQ128,
24761 IX86_BUILTIN_PUNPCKLBW128,
24762 IX86_BUILTIN_PUNPCKLWD128,
24763 IX86_BUILTIN_PUNPCKLDQ128,
24764 IX86_BUILTIN_PUNPCKLQDQ128,
24766 IX86_BUILTIN_CLFLUSH,
24767 IX86_BUILTIN_MFENCE,
24768 IX86_BUILTIN_LFENCE,
24769 IX86_BUILTIN_PAUSE,
24771 IX86_BUILTIN_BSRSI,
24772 IX86_BUILTIN_BSRDI,
24773 IX86_BUILTIN_RDPMC,
24774 IX86_BUILTIN_RDTSC,
24775 IX86_BUILTIN_RDTSCP,
24776 IX86_BUILTIN_ROLQI,
24777 IX86_BUILTIN_ROLHI,
24778 IX86_BUILTIN_RORQI,
24779 IX86_BUILTIN_RORHI,
24782 IX86_BUILTIN_ADDSUBPS,
24783 IX86_BUILTIN_HADDPS,
24784 IX86_BUILTIN_HSUBPS,
24785 IX86_BUILTIN_MOVSHDUP,
24786 IX86_BUILTIN_MOVSLDUP,
24787 IX86_BUILTIN_ADDSUBPD,
24788 IX86_BUILTIN_HADDPD,
24789 IX86_BUILTIN_HSUBPD,
24790 IX86_BUILTIN_LDDQU,
24792 IX86_BUILTIN_MONITOR,
24793 IX86_BUILTIN_MWAIT,
24796 IX86_BUILTIN_PHADDW,
24797 IX86_BUILTIN_PHADDD,
24798 IX86_BUILTIN_PHADDSW,
24799 IX86_BUILTIN_PHSUBW,
24800 IX86_BUILTIN_PHSUBD,
24801 IX86_BUILTIN_PHSUBSW,
24802 IX86_BUILTIN_PMADDUBSW,
24803 IX86_BUILTIN_PMULHRSW,
24804 IX86_BUILTIN_PSHUFB,
24805 IX86_BUILTIN_PSIGNB,
24806 IX86_BUILTIN_PSIGNW,
24807 IX86_BUILTIN_PSIGND,
24808 IX86_BUILTIN_PALIGNR,
24809 IX86_BUILTIN_PABSB,
24810 IX86_BUILTIN_PABSW,
24811 IX86_BUILTIN_PABSD,
24813 IX86_BUILTIN_PHADDW128,
24814 IX86_BUILTIN_PHADDD128,
24815 IX86_BUILTIN_PHADDSW128,
24816 IX86_BUILTIN_PHSUBW128,
24817 IX86_BUILTIN_PHSUBD128,
24818 IX86_BUILTIN_PHSUBSW128,
24819 IX86_BUILTIN_PMADDUBSW128,
24820 IX86_BUILTIN_PMULHRSW128,
24821 IX86_BUILTIN_PSHUFB128,
24822 IX86_BUILTIN_PSIGNB128,
24823 IX86_BUILTIN_PSIGNW128,
24824 IX86_BUILTIN_PSIGND128,
24825 IX86_BUILTIN_PALIGNR128,
24826 IX86_BUILTIN_PABSB128,
24827 IX86_BUILTIN_PABSW128,
24828 IX86_BUILTIN_PABSD128,
24830 /* AMDFAM10 - SSE4A New Instructions. */
24831 IX86_BUILTIN_MOVNTSD,
24832 IX86_BUILTIN_MOVNTSS,
24833 IX86_BUILTIN_EXTRQI,
24834 IX86_BUILTIN_EXTRQ,
24835 IX86_BUILTIN_INSERTQI,
24836 IX86_BUILTIN_INSERTQ,
24839 IX86_BUILTIN_BLENDPD,
24840 IX86_BUILTIN_BLENDPS,
24841 IX86_BUILTIN_BLENDVPD,
24842 IX86_BUILTIN_BLENDVPS,
24843 IX86_BUILTIN_PBLENDVB128,
24844 IX86_BUILTIN_PBLENDW128,
24849 IX86_BUILTIN_INSERTPS128,
24851 IX86_BUILTIN_MOVNTDQA,
24852 IX86_BUILTIN_MPSADBW128,
24853 IX86_BUILTIN_PACKUSDW128,
24854 IX86_BUILTIN_PCMPEQQ,
24855 IX86_BUILTIN_PHMINPOSUW128,
24857 IX86_BUILTIN_PMAXSB128,
24858 IX86_BUILTIN_PMAXSD128,
24859 IX86_BUILTIN_PMAXUD128,
24860 IX86_BUILTIN_PMAXUW128,
24862 IX86_BUILTIN_PMINSB128,
24863 IX86_BUILTIN_PMINSD128,
24864 IX86_BUILTIN_PMINUD128,
24865 IX86_BUILTIN_PMINUW128,
24867 IX86_BUILTIN_PMOVSXBW128,
24868 IX86_BUILTIN_PMOVSXBD128,
24869 IX86_BUILTIN_PMOVSXBQ128,
24870 IX86_BUILTIN_PMOVSXWD128,
24871 IX86_BUILTIN_PMOVSXWQ128,
24872 IX86_BUILTIN_PMOVSXDQ128,
24874 IX86_BUILTIN_PMOVZXBW128,
24875 IX86_BUILTIN_PMOVZXBD128,
24876 IX86_BUILTIN_PMOVZXBQ128,
24877 IX86_BUILTIN_PMOVZXWD128,
24878 IX86_BUILTIN_PMOVZXWQ128,
24879 IX86_BUILTIN_PMOVZXDQ128,
24881 IX86_BUILTIN_PMULDQ128,
24882 IX86_BUILTIN_PMULLD128,
24884 IX86_BUILTIN_ROUNDPD,
24885 IX86_BUILTIN_ROUNDPS,
24886 IX86_BUILTIN_ROUNDSD,
24887 IX86_BUILTIN_ROUNDSS,
24889 IX86_BUILTIN_FLOORPD,
24890 IX86_BUILTIN_CEILPD,
24891 IX86_BUILTIN_TRUNCPD,
24892 IX86_BUILTIN_RINTPD,
24893 IX86_BUILTIN_ROUNDPD_AZ,
24894 IX86_BUILTIN_FLOORPS,
24895 IX86_BUILTIN_CEILPS,
24896 IX86_BUILTIN_TRUNCPS,
24897 IX86_BUILTIN_RINTPS,
24898 IX86_BUILTIN_ROUNDPS_AZ,
24900 IX86_BUILTIN_PTESTZ,
24901 IX86_BUILTIN_PTESTC,
24902 IX86_BUILTIN_PTESTNZC,
24904 IX86_BUILTIN_VEC_INIT_V2SI,
24905 IX86_BUILTIN_VEC_INIT_V4HI,
24906 IX86_BUILTIN_VEC_INIT_V8QI,
24907 IX86_BUILTIN_VEC_EXT_V2DF,
24908 IX86_BUILTIN_VEC_EXT_V2DI,
24909 IX86_BUILTIN_VEC_EXT_V4SF,
24910 IX86_BUILTIN_VEC_EXT_V4SI,
24911 IX86_BUILTIN_VEC_EXT_V8HI,
24912 IX86_BUILTIN_VEC_EXT_V2SI,
24913 IX86_BUILTIN_VEC_EXT_V4HI,
24914 IX86_BUILTIN_VEC_EXT_V16QI,
24915 IX86_BUILTIN_VEC_SET_V2DI,
24916 IX86_BUILTIN_VEC_SET_V4SF,
24917 IX86_BUILTIN_VEC_SET_V4SI,
24918 IX86_BUILTIN_VEC_SET_V8HI,
24919 IX86_BUILTIN_VEC_SET_V4HI,
24920 IX86_BUILTIN_VEC_SET_V16QI,
24922 IX86_BUILTIN_VEC_PACK_SFIX,
24923 IX86_BUILTIN_VEC_PACK_SFIX256,
24926 IX86_BUILTIN_CRC32QI,
24927 IX86_BUILTIN_CRC32HI,
24928 IX86_BUILTIN_CRC32SI,
24929 IX86_BUILTIN_CRC32DI,
24931 IX86_BUILTIN_PCMPESTRI128,
24932 IX86_BUILTIN_PCMPESTRM128,
24933 IX86_BUILTIN_PCMPESTRA128,
24934 IX86_BUILTIN_PCMPESTRC128,
24935 IX86_BUILTIN_PCMPESTRO128,
24936 IX86_BUILTIN_PCMPESTRS128,
24937 IX86_BUILTIN_PCMPESTRZ128,
24938 IX86_BUILTIN_PCMPISTRI128,
24939 IX86_BUILTIN_PCMPISTRM128,
24940 IX86_BUILTIN_PCMPISTRA128,
24941 IX86_BUILTIN_PCMPISTRC128,
24942 IX86_BUILTIN_PCMPISTRO128,
24943 IX86_BUILTIN_PCMPISTRS128,
24944 IX86_BUILTIN_PCMPISTRZ128,
24946 IX86_BUILTIN_PCMPGTQ,
24948 /* AES instructions */
24949 IX86_BUILTIN_AESENC128,
24950 IX86_BUILTIN_AESENCLAST128,
24951 IX86_BUILTIN_AESDEC128,
24952 IX86_BUILTIN_AESDECLAST128,
24953 IX86_BUILTIN_AESIMC128,
24954 IX86_BUILTIN_AESKEYGENASSIST128,
24956 /* PCLMUL instruction */
24957 IX86_BUILTIN_PCLMULQDQ128,
24960 IX86_BUILTIN_ADDPD256,
24961 IX86_BUILTIN_ADDPS256,
24962 IX86_BUILTIN_ADDSUBPD256,
24963 IX86_BUILTIN_ADDSUBPS256,
24964 IX86_BUILTIN_ANDPD256,
24965 IX86_BUILTIN_ANDPS256,
24966 IX86_BUILTIN_ANDNPD256,
24967 IX86_BUILTIN_ANDNPS256,
24968 IX86_BUILTIN_BLENDPD256,
24969 IX86_BUILTIN_BLENDPS256,
24970 IX86_BUILTIN_BLENDVPD256,
24971 IX86_BUILTIN_BLENDVPS256,
24972 IX86_BUILTIN_DIVPD256,
24973 IX86_BUILTIN_DIVPS256,
24974 IX86_BUILTIN_DPPS256,
24975 IX86_BUILTIN_HADDPD256,
24976 IX86_BUILTIN_HADDPS256,
24977 IX86_BUILTIN_HSUBPD256,
24978 IX86_BUILTIN_HSUBPS256,
24979 IX86_BUILTIN_MAXPD256,
24980 IX86_BUILTIN_MAXPS256,
24981 IX86_BUILTIN_MINPD256,
24982 IX86_BUILTIN_MINPS256,
24983 IX86_BUILTIN_MULPD256,
24984 IX86_BUILTIN_MULPS256,
24985 IX86_BUILTIN_ORPD256,
24986 IX86_BUILTIN_ORPS256,
24987 IX86_BUILTIN_SHUFPD256,
24988 IX86_BUILTIN_SHUFPS256,
24989 IX86_BUILTIN_SUBPD256,
24990 IX86_BUILTIN_SUBPS256,
24991 IX86_BUILTIN_XORPD256,
24992 IX86_BUILTIN_XORPS256,
24993 IX86_BUILTIN_CMPSD,
24994 IX86_BUILTIN_CMPSS,
24995 IX86_BUILTIN_CMPPD,
24996 IX86_BUILTIN_CMPPS,
24997 IX86_BUILTIN_CMPPD256,
24998 IX86_BUILTIN_CMPPS256,
24999 IX86_BUILTIN_CVTDQ2PD256,
25000 IX86_BUILTIN_CVTDQ2PS256,
25001 IX86_BUILTIN_CVTPD2PS256,
25002 IX86_BUILTIN_CVTPS2DQ256,
25003 IX86_BUILTIN_CVTPS2PD256,
25004 IX86_BUILTIN_CVTTPD2DQ256,
25005 IX86_BUILTIN_CVTPD2DQ256,
25006 IX86_BUILTIN_CVTTPS2DQ256,
25007 IX86_BUILTIN_EXTRACTF128PD256,
25008 IX86_BUILTIN_EXTRACTF128PS256,
25009 IX86_BUILTIN_EXTRACTF128SI256,
25010 IX86_BUILTIN_VZEROALL,
25011 IX86_BUILTIN_VZEROUPPER,
25012 IX86_BUILTIN_VPERMILVARPD,
25013 IX86_BUILTIN_VPERMILVARPS,
25014 IX86_BUILTIN_VPERMILVARPD256,
25015 IX86_BUILTIN_VPERMILVARPS256,
25016 IX86_BUILTIN_VPERMILPD,
25017 IX86_BUILTIN_VPERMILPS,
25018 IX86_BUILTIN_VPERMILPD256,
25019 IX86_BUILTIN_VPERMILPS256,
25020 IX86_BUILTIN_VPERMIL2PD,
25021 IX86_BUILTIN_VPERMIL2PS,
25022 IX86_BUILTIN_VPERMIL2PD256,
25023 IX86_BUILTIN_VPERMIL2PS256,
25024 IX86_BUILTIN_VPERM2F128PD256,
25025 IX86_BUILTIN_VPERM2F128PS256,
25026 IX86_BUILTIN_VPERM2F128SI256,
25027 IX86_BUILTIN_VBROADCASTSS,
25028 IX86_BUILTIN_VBROADCASTSD256,
25029 IX86_BUILTIN_VBROADCASTSS256,
25030 IX86_BUILTIN_VBROADCASTPD256,
25031 IX86_BUILTIN_VBROADCASTPS256,
25032 IX86_BUILTIN_VINSERTF128PD256,
25033 IX86_BUILTIN_VINSERTF128PS256,
25034 IX86_BUILTIN_VINSERTF128SI256,
25035 IX86_BUILTIN_LOADUPD256,
25036 IX86_BUILTIN_LOADUPS256,
25037 IX86_BUILTIN_STOREUPD256,
25038 IX86_BUILTIN_STOREUPS256,
25039 IX86_BUILTIN_LDDQU256,
25040 IX86_BUILTIN_MOVNTDQ256,
25041 IX86_BUILTIN_MOVNTPD256,
25042 IX86_BUILTIN_MOVNTPS256,
25043 IX86_BUILTIN_LOADDQU256,
25044 IX86_BUILTIN_STOREDQU256,
25045 IX86_BUILTIN_MASKLOADPD,
25046 IX86_BUILTIN_MASKLOADPS,
25047 IX86_BUILTIN_MASKSTOREPD,
25048 IX86_BUILTIN_MASKSTOREPS,
25049 IX86_BUILTIN_MASKLOADPD256,
25050 IX86_BUILTIN_MASKLOADPS256,
25051 IX86_BUILTIN_MASKSTOREPD256,
25052 IX86_BUILTIN_MASKSTOREPS256,
25053 IX86_BUILTIN_MOVSHDUP256,
25054 IX86_BUILTIN_MOVSLDUP256,
25055 IX86_BUILTIN_MOVDDUP256,
25057 IX86_BUILTIN_SQRTPD256,
25058 IX86_BUILTIN_SQRTPS256,
25059 IX86_BUILTIN_SQRTPS_NR256,
25060 IX86_BUILTIN_RSQRTPS256,
25061 IX86_BUILTIN_RSQRTPS_NR256,
25063 IX86_BUILTIN_RCPPS256,
25065 IX86_BUILTIN_ROUNDPD256,
25066 IX86_BUILTIN_ROUNDPS256,
25068 IX86_BUILTIN_FLOORPD256,
25069 IX86_BUILTIN_CEILPD256,
25070 IX86_BUILTIN_TRUNCPD256,
25071 IX86_BUILTIN_RINTPD256,
25072 IX86_BUILTIN_ROUNDPD_AZ256,
25073 IX86_BUILTIN_FLOORPS256,
25074 IX86_BUILTIN_CEILPS256,
25075 IX86_BUILTIN_TRUNCPS256,
25076 IX86_BUILTIN_RINTPS256,
25077 IX86_BUILTIN_ROUNDPS_AZ256,
25079 IX86_BUILTIN_UNPCKHPD256,
25080 IX86_BUILTIN_UNPCKLPD256,
25081 IX86_BUILTIN_UNPCKHPS256,
25082 IX86_BUILTIN_UNPCKLPS256,
25084 IX86_BUILTIN_SI256_SI,
25085 IX86_BUILTIN_PS256_PS,
25086 IX86_BUILTIN_PD256_PD,
25087 IX86_BUILTIN_SI_SI256,
25088 IX86_BUILTIN_PS_PS256,
25089 IX86_BUILTIN_PD_PD256,
25091 IX86_BUILTIN_VTESTZPD,
25092 IX86_BUILTIN_VTESTCPD,
25093 IX86_BUILTIN_VTESTNZCPD,
25094 IX86_BUILTIN_VTESTZPS,
25095 IX86_BUILTIN_VTESTCPS,
25096 IX86_BUILTIN_VTESTNZCPS,
25097 IX86_BUILTIN_VTESTZPD256,
25098 IX86_BUILTIN_VTESTCPD256,
25099 IX86_BUILTIN_VTESTNZCPD256,
25100 IX86_BUILTIN_VTESTZPS256,
25101 IX86_BUILTIN_VTESTCPS256,
25102 IX86_BUILTIN_VTESTNZCPS256,
25103 IX86_BUILTIN_PTESTZ256,
25104 IX86_BUILTIN_PTESTC256,
25105 IX86_BUILTIN_PTESTNZC256,
25107 IX86_BUILTIN_MOVMSKPD256,
25108 IX86_BUILTIN_MOVMSKPS256,
25111 IX86_BUILTIN_MPSADBW256,
25112 IX86_BUILTIN_PABSB256,
25113 IX86_BUILTIN_PABSW256,
25114 IX86_BUILTIN_PABSD256,
25115 IX86_BUILTIN_PACKSSDW256,
25116 IX86_BUILTIN_PACKSSWB256,
25117 IX86_BUILTIN_PACKUSDW256,
25118 IX86_BUILTIN_PACKUSWB256,
25119 IX86_BUILTIN_PADDB256,
25120 IX86_BUILTIN_PADDW256,
25121 IX86_BUILTIN_PADDD256,
25122 IX86_BUILTIN_PADDQ256,
25123 IX86_BUILTIN_PADDSB256,
25124 IX86_BUILTIN_PADDSW256,
25125 IX86_BUILTIN_PADDUSB256,
25126 IX86_BUILTIN_PADDUSW256,
25127 IX86_BUILTIN_PALIGNR256,
25128 IX86_BUILTIN_AND256I,
25129 IX86_BUILTIN_ANDNOT256I,
25130 IX86_BUILTIN_PAVGB256,
25131 IX86_BUILTIN_PAVGW256,
25132 IX86_BUILTIN_PBLENDVB256,
25133 IX86_BUILTIN_PBLENDVW256,
25134 IX86_BUILTIN_PCMPEQB256,
25135 IX86_BUILTIN_PCMPEQW256,
25136 IX86_BUILTIN_PCMPEQD256,
25137 IX86_BUILTIN_PCMPEQQ256,
25138 IX86_BUILTIN_PCMPGTB256,
25139 IX86_BUILTIN_PCMPGTW256,
25140 IX86_BUILTIN_PCMPGTD256,
25141 IX86_BUILTIN_PCMPGTQ256,
25142 IX86_BUILTIN_PHADDW256,
25143 IX86_BUILTIN_PHADDD256,
25144 IX86_BUILTIN_PHADDSW256,
25145 IX86_BUILTIN_PHSUBW256,
25146 IX86_BUILTIN_PHSUBD256,
25147 IX86_BUILTIN_PHSUBSW256,
25148 IX86_BUILTIN_PMADDUBSW256,
25149 IX86_BUILTIN_PMADDWD256,
25150 IX86_BUILTIN_PMAXSB256,
25151 IX86_BUILTIN_PMAXSW256,
25152 IX86_BUILTIN_PMAXSD256,
25153 IX86_BUILTIN_PMAXUB256,
25154 IX86_BUILTIN_PMAXUW256,
25155 IX86_BUILTIN_PMAXUD256,
25156 IX86_BUILTIN_PMINSB256,
25157 IX86_BUILTIN_PMINSW256,
25158 IX86_BUILTIN_PMINSD256,
25159 IX86_BUILTIN_PMINUB256,
25160 IX86_BUILTIN_PMINUW256,
25161 IX86_BUILTIN_PMINUD256,
25162 IX86_BUILTIN_PMOVMSKB256,
25163 IX86_BUILTIN_PMOVSXBW256,
25164 IX86_BUILTIN_PMOVSXBD256,
25165 IX86_BUILTIN_PMOVSXBQ256,
25166 IX86_BUILTIN_PMOVSXWD256,
25167 IX86_BUILTIN_PMOVSXWQ256,
25168 IX86_BUILTIN_PMOVSXDQ256,
25169 IX86_BUILTIN_PMOVZXBW256,
25170 IX86_BUILTIN_PMOVZXBD256,
25171 IX86_BUILTIN_PMOVZXBQ256,
25172 IX86_BUILTIN_PMOVZXWD256,
25173 IX86_BUILTIN_PMOVZXWQ256,
25174 IX86_BUILTIN_PMOVZXDQ256,
25175 IX86_BUILTIN_PMULDQ256,
25176 IX86_BUILTIN_PMULHRSW256,
25177 IX86_BUILTIN_PMULHUW256,
25178 IX86_BUILTIN_PMULHW256,
25179 IX86_BUILTIN_PMULLW256,
25180 IX86_BUILTIN_PMULLD256,
25181 IX86_BUILTIN_PMULUDQ256,
25182 IX86_BUILTIN_POR256,
25183 IX86_BUILTIN_PSADBW256,
25184 IX86_BUILTIN_PSHUFB256,
25185 IX86_BUILTIN_PSHUFD256,
25186 IX86_BUILTIN_PSHUFHW256,
25187 IX86_BUILTIN_PSHUFLW256,
25188 IX86_BUILTIN_PSIGNB256,
25189 IX86_BUILTIN_PSIGNW256,
25190 IX86_BUILTIN_PSIGND256,
25191 IX86_BUILTIN_PSLLDQI256,
25192 IX86_BUILTIN_PSLLWI256,
25193 IX86_BUILTIN_PSLLW256,
25194 IX86_BUILTIN_PSLLDI256,
25195 IX86_BUILTIN_PSLLD256,
25196 IX86_BUILTIN_PSLLQI256,
25197 IX86_BUILTIN_PSLLQ256,
25198 IX86_BUILTIN_PSRAWI256,
25199 IX86_BUILTIN_PSRAW256,
25200 IX86_BUILTIN_PSRADI256,
25201 IX86_BUILTIN_PSRAD256,
25202 IX86_BUILTIN_PSRLDQI256,
25203 IX86_BUILTIN_PSRLWI256,
25204 IX86_BUILTIN_PSRLW256,
25205 IX86_BUILTIN_PSRLDI256,
25206 IX86_BUILTIN_PSRLD256,
25207 IX86_BUILTIN_PSRLQI256,
25208 IX86_BUILTIN_PSRLQ256,
25209 IX86_BUILTIN_PSUBB256,
25210 IX86_BUILTIN_PSUBW256,
25211 IX86_BUILTIN_PSUBD256,
25212 IX86_BUILTIN_PSUBQ256,
25213 IX86_BUILTIN_PSUBSB256,
25214 IX86_BUILTIN_PSUBSW256,
25215 IX86_BUILTIN_PSUBUSB256,
25216 IX86_BUILTIN_PSUBUSW256,
25217 IX86_BUILTIN_PUNPCKHBW256,
25218 IX86_BUILTIN_PUNPCKHWD256,
25219 IX86_BUILTIN_PUNPCKHDQ256,
25220 IX86_BUILTIN_PUNPCKHQDQ256,
25221 IX86_BUILTIN_PUNPCKLBW256,
25222 IX86_BUILTIN_PUNPCKLWD256,
25223 IX86_BUILTIN_PUNPCKLDQ256,
25224 IX86_BUILTIN_PUNPCKLQDQ256,
25225 IX86_BUILTIN_PXOR256,
25226 IX86_BUILTIN_MOVNTDQA256,
25227 IX86_BUILTIN_VBROADCASTSS_PS,
25228 IX86_BUILTIN_VBROADCASTSS_PS256,
25229 IX86_BUILTIN_VBROADCASTSD_PD256,
25230 IX86_BUILTIN_VBROADCASTSI256,
25231 IX86_BUILTIN_PBLENDD256,
25232 IX86_BUILTIN_PBLENDD128,
25233 IX86_BUILTIN_PBROADCASTB256,
25234 IX86_BUILTIN_PBROADCASTW256,
25235 IX86_BUILTIN_PBROADCASTD256,
25236 IX86_BUILTIN_PBROADCASTQ256,
25237 IX86_BUILTIN_PBROADCASTB128,
25238 IX86_BUILTIN_PBROADCASTW128,
25239 IX86_BUILTIN_PBROADCASTD128,
25240 IX86_BUILTIN_PBROADCASTQ128,
25241 IX86_BUILTIN_VPERMVARSI256,
25242 IX86_BUILTIN_VPERMDF256,
25243 IX86_BUILTIN_VPERMVARSF256,
25244 IX86_BUILTIN_VPERMDI256,
25245 IX86_BUILTIN_VPERMTI256,
25246 IX86_BUILTIN_VEXTRACT128I256,
25247 IX86_BUILTIN_VINSERT128I256,
25248 IX86_BUILTIN_MASKLOADD,
25249 IX86_BUILTIN_MASKLOADQ,
25250 IX86_BUILTIN_MASKLOADD256,
25251 IX86_BUILTIN_MASKLOADQ256,
25252 IX86_BUILTIN_MASKSTORED,
25253 IX86_BUILTIN_MASKSTOREQ,
25254 IX86_BUILTIN_MASKSTORED256,
25255 IX86_BUILTIN_MASKSTOREQ256,
25256 IX86_BUILTIN_PSLLVV4DI,
25257 IX86_BUILTIN_PSLLVV2DI,
25258 IX86_BUILTIN_PSLLVV8SI,
25259 IX86_BUILTIN_PSLLVV4SI,
25260 IX86_BUILTIN_PSRAVV8SI,
25261 IX86_BUILTIN_PSRAVV4SI,
25262 IX86_BUILTIN_PSRLVV4DI,
25263 IX86_BUILTIN_PSRLVV2DI,
25264 IX86_BUILTIN_PSRLVV8SI,
25265 IX86_BUILTIN_PSRLVV4SI,
25267 IX86_BUILTIN_GATHERSIV2DF,
25268 IX86_BUILTIN_GATHERSIV4DF,
25269 IX86_BUILTIN_GATHERDIV2DF,
25270 IX86_BUILTIN_GATHERDIV4DF,
25271 IX86_BUILTIN_GATHERSIV4SF,
25272 IX86_BUILTIN_GATHERSIV8SF,
25273 IX86_BUILTIN_GATHERDIV4SF,
25274 IX86_BUILTIN_GATHERDIV8SF,
25275 IX86_BUILTIN_GATHERSIV2DI,
25276 IX86_BUILTIN_GATHERSIV4DI,
25277 IX86_BUILTIN_GATHERDIV2DI,
25278 IX86_BUILTIN_GATHERDIV4DI,
25279 IX86_BUILTIN_GATHERSIV4SI,
25280 IX86_BUILTIN_GATHERSIV8SI,
25281 IX86_BUILTIN_GATHERDIV4SI,
25282 IX86_BUILTIN_GATHERDIV8SI,
25284 /* Alternate 4 element gather for the vectorizer where
25285 all operands are 32-byte wide. */
25286 IX86_BUILTIN_GATHERALTSIV4DF,
25287 IX86_BUILTIN_GATHERALTDIV8SF,
25288 IX86_BUILTIN_GATHERALTSIV4DI,
25289 IX86_BUILTIN_GATHERALTDIV8SI,
25291 /* TFmode support builtins. */
25293 IX86_BUILTIN_HUGE_VALQ,
25294 IX86_BUILTIN_FABSQ,
25295 IX86_BUILTIN_COPYSIGNQ,
25297 /* Vectorizer support builtins. */
25298 IX86_BUILTIN_CPYSGNPS,
25299 IX86_BUILTIN_CPYSGNPD,
25300 IX86_BUILTIN_CPYSGNPS256,
25301 IX86_BUILTIN_CPYSGNPD256,
25303 /* FMA4 instructions. */
25304 IX86_BUILTIN_VFMADDSS,
25305 IX86_BUILTIN_VFMADDSD,
25306 IX86_BUILTIN_VFMADDPS,
25307 IX86_BUILTIN_VFMADDPD,
25308 IX86_BUILTIN_VFMADDPS256,
25309 IX86_BUILTIN_VFMADDPD256,
25310 IX86_BUILTIN_VFMADDSUBPS,
25311 IX86_BUILTIN_VFMADDSUBPD,
25312 IX86_BUILTIN_VFMADDSUBPS256,
25313 IX86_BUILTIN_VFMADDSUBPD256,
25315 /* FMA3 instructions. */
25316 IX86_BUILTIN_VFMADDSS3,
25317 IX86_BUILTIN_VFMADDSD3,
25319 /* XOP instructions. */
25320 IX86_BUILTIN_VPCMOV,
25321 IX86_BUILTIN_VPCMOV_V2DI,
25322 IX86_BUILTIN_VPCMOV_V4SI,
25323 IX86_BUILTIN_VPCMOV_V8HI,
25324 IX86_BUILTIN_VPCMOV_V16QI,
25325 IX86_BUILTIN_VPCMOV_V4SF,
25326 IX86_BUILTIN_VPCMOV_V2DF,
25327 IX86_BUILTIN_VPCMOV256,
25328 IX86_BUILTIN_VPCMOV_V4DI256,
25329 IX86_BUILTIN_VPCMOV_V8SI256,
25330 IX86_BUILTIN_VPCMOV_V16HI256,
25331 IX86_BUILTIN_VPCMOV_V32QI256,
25332 IX86_BUILTIN_VPCMOV_V8SF256,
25333 IX86_BUILTIN_VPCMOV_V4DF256,
25335 IX86_BUILTIN_VPPERM,
25337 IX86_BUILTIN_VPMACSSWW,
25338 IX86_BUILTIN_VPMACSWW,
25339 IX86_BUILTIN_VPMACSSWD,
25340 IX86_BUILTIN_VPMACSWD,
25341 IX86_BUILTIN_VPMACSSDD,
25342 IX86_BUILTIN_VPMACSDD,
25343 IX86_BUILTIN_VPMACSSDQL,
25344 IX86_BUILTIN_VPMACSSDQH,
25345 IX86_BUILTIN_VPMACSDQL,
25346 IX86_BUILTIN_VPMACSDQH,
25347 IX86_BUILTIN_VPMADCSSWD,
25348 IX86_BUILTIN_VPMADCSWD,
25350 IX86_BUILTIN_VPHADDBW,
25351 IX86_BUILTIN_VPHADDBD,
25352 IX86_BUILTIN_VPHADDBQ,
25353 IX86_BUILTIN_VPHADDWD,
25354 IX86_BUILTIN_VPHADDWQ,
25355 IX86_BUILTIN_VPHADDDQ,
25356 IX86_BUILTIN_VPHADDUBW,
25357 IX86_BUILTIN_VPHADDUBD,
25358 IX86_BUILTIN_VPHADDUBQ,
25359 IX86_BUILTIN_VPHADDUWD,
25360 IX86_BUILTIN_VPHADDUWQ,
25361 IX86_BUILTIN_VPHADDUDQ,
25362 IX86_BUILTIN_VPHSUBBW,
25363 IX86_BUILTIN_VPHSUBWD,
25364 IX86_BUILTIN_VPHSUBDQ,
25366 IX86_BUILTIN_VPROTB,
25367 IX86_BUILTIN_VPROTW,
25368 IX86_BUILTIN_VPROTD,
25369 IX86_BUILTIN_VPROTQ,
25370 IX86_BUILTIN_VPROTB_IMM,
25371 IX86_BUILTIN_VPROTW_IMM,
25372 IX86_BUILTIN_VPROTD_IMM,
25373 IX86_BUILTIN_VPROTQ_IMM,
25375 IX86_BUILTIN_VPSHLB,
25376 IX86_BUILTIN_VPSHLW,
25377 IX86_BUILTIN_VPSHLD,
25378 IX86_BUILTIN_VPSHLQ,
25379 IX86_BUILTIN_VPSHAB,
25380 IX86_BUILTIN_VPSHAW,
25381 IX86_BUILTIN_VPSHAD,
25382 IX86_BUILTIN_VPSHAQ,
25384 IX86_BUILTIN_VFRCZSS,
25385 IX86_BUILTIN_VFRCZSD,
25386 IX86_BUILTIN_VFRCZPS,
25387 IX86_BUILTIN_VFRCZPD,
25388 IX86_BUILTIN_VFRCZPS256,
25389 IX86_BUILTIN_VFRCZPD256,
25391 IX86_BUILTIN_VPCOMEQUB,
25392 IX86_BUILTIN_VPCOMNEUB,
25393 IX86_BUILTIN_VPCOMLTUB,
25394 IX86_BUILTIN_VPCOMLEUB,
25395 IX86_BUILTIN_VPCOMGTUB,
25396 IX86_BUILTIN_VPCOMGEUB,
25397 IX86_BUILTIN_VPCOMFALSEUB,
25398 IX86_BUILTIN_VPCOMTRUEUB,
25400 IX86_BUILTIN_VPCOMEQUW,
25401 IX86_BUILTIN_VPCOMNEUW,
25402 IX86_BUILTIN_VPCOMLTUW,
25403 IX86_BUILTIN_VPCOMLEUW,
25404 IX86_BUILTIN_VPCOMGTUW,
25405 IX86_BUILTIN_VPCOMGEUW,
25406 IX86_BUILTIN_VPCOMFALSEUW,
25407 IX86_BUILTIN_VPCOMTRUEUW,
25409 IX86_BUILTIN_VPCOMEQUD,
25410 IX86_BUILTIN_VPCOMNEUD,
25411 IX86_BUILTIN_VPCOMLTUD,
25412 IX86_BUILTIN_VPCOMLEUD,
25413 IX86_BUILTIN_VPCOMGTUD,
25414 IX86_BUILTIN_VPCOMGEUD,
25415 IX86_BUILTIN_VPCOMFALSEUD,
25416 IX86_BUILTIN_VPCOMTRUEUD,
25418 IX86_BUILTIN_VPCOMEQUQ,
25419 IX86_BUILTIN_VPCOMNEUQ,
25420 IX86_BUILTIN_VPCOMLTUQ,
25421 IX86_BUILTIN_VPCOMLEUQ,
25422 IX86_BUILTIN_VPCOMGTUQ,
25423 IX86_BUILTIN_VPCOMGEUQ,
25424 IX86_BUILTIN_VPCOMFALSEUQ,
25425 IX86_BUILTIN_VPCOMTRUEUQ,
25427 IX86_BUILTIN_VPCOMEQB,
25428 IX86_BUILTIN_VPCOMNEB,
25429 IX86_BUILTIN_VPCOMLTB,
25430 IX86_BUILTIN_VPCOMLEB,
25431 IX86_BUILTIN_VPCOMGTB,
25432 IX86_BUILTIN_VPCOMGEB,
25433 IX86_BUILTIN_VPCOMFALSEB,
25434 IX86_BUILTIN_VPCOMTRUEB,
25436 IX86_BUILTIN_VPCOMEQW,
25437 IX86_BUILTIN_VPCOMNEW,
25438 IX86_BUILTIN_VPCOMLTW,
25439 IX86_BUILTIN_VPCOMLEW,
25440 IX86_BUILTIN_VPCOMGTW,
25441 IX86_BUILTIN_VPCOMGEW,
25442 IX86_BUILTIN_VPCOMFALSEW,
25443 IX86_BUILTIN_VPCOMTRUEW,
25445 IX86_BUILTIN_VPCOMEQD,
25446 IX86_BUILTIN_VPCOMNED,
25447 IX86_BUILTIN_VPCOMLTD,
25448 IX86_BUILTIN_VPCOMLED,
25449 IX86_BUILTIN_VPCOMGTD,
25450 IX86_BUILTIN_VPCOMGED,
25451 IX86_BUILTIN_VPCOMFALSED,
25452 IX86_BUILTIN_VPCOMTRUED,
25454 IX86_BUILTIN_VPCOMEQQ,
25455 IX86_BUILTIN_VPCOMNEQ,
25456 IX86_BUILTIN_VPCOMLTQ,
25457 IX86_BUILTIN_VPCOMLEQ,
25458 IX86_BUILTIN_VPCOMGTQ,
25459 IX86_BUILTIN_VPCOMGEQ,
25460 IX86_BUILTIN_VPCOMFALSEQ,
25461 IX86_BUILTIN_VPCOMTRUEQ,
25463 /* LWP instructions. */
25464 IX86_BUILTIN_LLWPCB,
25465 IX86_BUILTIN_SLWPCB,
25466 IX86_BUILTIN_LWPVAL32,
25467 IX86_BUILTIN_LWPVAL64,
25468 IX86_BUILTIN_LWPINS32,
25469 IX86_BUILTIN_LWPINS64,
25473 /* BMI instructions. */
25474 IX86_BUILTIN_BEXTR32,
25475 IX86_BUILTIN_BEXTR64,
25478 /* TBM instructions. */
25479 IX86_BUILTIN_BEXTRI32,
25480 IX86_BUILTIN_BEXTRI64,
25482 /* BMI2 instructions. */
25483 IX86_BUILTIN_BZHI32,
25484 IX86_BUILTIN_BZHI64,
25485 IX86_BUILTIN_PDEP32,
25486 IX86_BUILTIN_PDEP64,
25487 IX86_BUILTIN_PEXT32,
25488 IX86_BUILTIN_PEXT64,
25490 /* FSGSBASE instructions. */
25491 IX86_BUILTIN_RDFSBASE32,
25492 IX86_BUILTIN_RDFSBASE64,
25493 IX86_BUILTIN_RDGSBASE32,
25494 IX86_BUILTIN_RDGSBASE64,
25495 IX86_BUILTIN_WRFSBASE32,
25496 IX86_BUILTIN_WRFSBASE64,
25497 IX86_BUILTIN_WRGSBASE32,
25498 IX86_BUILTIN_WRGSBASE64,
25500 /* RDRND instructions. */
25501 IX86_BUILTIN_RDRAND16_STEP,
25502 IX86_BUILTIN_RDRAND32_STEP,
25503 IX86_BUILTIN_RDRAND64_STEP,
25505 /* F16C instructions. */
25506 IX86_BUILTIN_CVTPH2PS,
25507 IX86_BUILTIN_CVTPH2PS256,
25508 IX86_BUILTIN_CVTPS2PH,
25509 IX86_BUILTIN_CVTPS2PH256,
25511 /* CFString built-in for darwin */
25512 IX86_BUILTIN_CFSTRING,
25517 /* Table for the ix86 builtin decls. */
25518 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
25520 /* Table of all of the builtin functions that are possible with different ISA's
25521 but are waiting to be built until a function is declared to use that
25523 struct builtin_isa {
25524 const char *name; /* function name */
25525 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
25526 HOST_WIDE_INT isa; /* isa_flags this builtin is defined for */
25527 bool const_p; /* true if the declaration is constant */
25528 bool set_and_not_built_p;
25531 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
25534 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
25535 of which isa_flags to use in the ix86_builtins_isa array. Stores the
25536 function decl in the ix86_builtins array. Returns the function decl or
25537 NULL_TREE, if the builtin was not added.
25539 If the front end has a special hook for builtin functions, delay adding
25540 builtin functions that aren't in the current ISA until the ISA is changed
25541 with function specific optimization. Doing so, can save about 300K for the
25542 default compiler. When the builtin is expanded, check at that time whether
25545 If the front end doesn't have a special hook, record all builtins, even if
25546 it isn't an instruction set in the current ISA in case the user uses
25547 function specific options for a different ISA, so that we don't get scope
25548 errors if a builtin is added in the middle of a function scope. */
25551 def_builtin (HOST_WIDE_INT mask, const char *name,
25552 enum ix86_builtin_func_type tcode,
25553 enum ix86_builtins code)
25555 tree decl = NULL_TREE;
25557 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
25559 ix86_builtins_isa[(int) code].isa = mask;
25561 mask &= ~OPTION_MASK_ISA_64BIT;
25563 || (mask & ix86_isa_flags) != 0
25564 || (lang_hooks.builtin_function
25565 == lang_hooks.builtin_function_ext_scope))
25568 tree type = ix86_get_builtin_func_type (tcode);
25569 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
25571 ix86_builtins[(int) code] = decl;
25572 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
25576 ix86_builtins[(int) code] = NULL_TREE;
25577 ix86_builtins_isa[(int) code].tcode = tcode;
25578 ix86_builtins_isa[(int) code].name = name;
25579 ix86_builtins_isa[(int) code].const_p = false;
25580 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
25587 /* Like def_builtin, but also marks the function decl "const". */
25590 def_builtin_const (HOST_WIDE_INT mask, const char *name,
25591 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
25593 tree decl = def_builtin (mask, name, tcode, code);
25595 TREE_READONLY (decl) = 1;
25597 ix86_builtins_isa[(int) code].const_p = true;
25602 /* Add any new builtin functions for a given ISA that may not have been
25603 declared. This saves a bit of space compared to adding all of the
25604 declarations to the tree, even if we didn't use them. */
25607 ix86_add_new_builtins (HOST_WIDE_INT isa)
25611 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
25613 if ((ix86_builtins_isa[i].isa & isa) != 0
25614 && ix86_builtins_isa[i].set_and_not_built_p)
25618 /* Don't define the builtin again. */
25619 ix86_builtins_isa[i].set_and_not_built_p = false;
25621 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
25622 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
25623 type, i, BUILT_IN_MD, NULL,
25626 ix86_builtins[i] = decl;
25627 if (ix86_builtins_isa[i].const_p)
25628 TREE_READONLY (decl) = 1;
25633 /* Bits for builtin_description.flag. */
25635 /* Set when we don't support the comparison natively, and should
25636 swap_comparison in order to support it. */
25637 #define BUILTIN_DESC_SWAP_OPERANDS 1
25639 struct builtin_description
25641 const HOST_WIDE_INT mask;
25642 const enum insn_code icode;
25643 const char *const name;
25644 const enum ix86_builtins code;
25645 const enum rtx_code comparison;
25649 static const struct builtin_description bdesc_comi[] =
25651 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
25652 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
25653 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
25654 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
25655 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
25656 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
25657 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
25658 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
25659 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
25660 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
25661 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
25662 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
25663 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
25664 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
25665 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
25666 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
25667 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
25668 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
25669 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
25670 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
25671 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
25672 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
25673 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
25674 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
25677 static const struct builtin_description bdesc_pcmpestr[] =
25680 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
25681 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
25682 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
25683 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
25684 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
25685 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
25686 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
25689 static const struct builtin_description bdesc_pcmpistr[] =
25692 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
25693 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
25694 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
25695 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
25696 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
25697 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
25698 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
25701 /* Special builtins with variable number of arguments. */
25702 static const struct builtin_description bdesc_special_args[] =
25704 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
25705 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
25706 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_pause, "__builtin_ia32_pause", IX86_BUILTIN_PAUSE, UNKNOWN, (int) VOID_FTYPE_VOID },
25709 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
25712 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
25715 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
25716 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
25717 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
25719 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
25720 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
25721 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
25722 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
25724 /* SSE or 3DNow!A */
25725 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
25726 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
25729 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
25730 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
25731 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
25732 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
25733 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
25734 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
25735 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
25736 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
25737 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
25739 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
25740 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
25743 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
25746 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
25749 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
25750 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
25753 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
25754 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
25756 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
25757 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
25758 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
25759 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
25760 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
25762 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
25763 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
25764 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
25765 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
25766 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
25767 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
25768 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
25770 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
25771 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
25772 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
25774 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DI },
25775 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SI },
25776 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DI },
25777 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SI },
25778 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DI_V2DF },
25779 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SI_V4SF },
25780 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DI_V4DF },
25781 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SI_V8SF },
25784 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_movntdqa, "__builtin_ia32_movntdqa256", IX86_BUILTIN_MOVNTDQA256, UNKNOWN, (int) V4DI_FTYPE_PV4DI },
25785 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_maskloadd, "__builtin_ia32_maskloadd", IX86_BUILTIN_MASKLOADD, UNKNOWN, (int) V4SI_FTYPE_PCV4SI_V4SI },
25786 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_maskloadq, "__builtin_ia32_maskloadq", IX86_BUILTIN_MASKLOADQ, UNKNOWN, (int) V2DI_FTYPE_PCV2DI_V2DI },
25787 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_maskloadd256, "__builtin_ia32_maskloadd256", IX86_BUILTIN_MASKLOADD256, UNKNOWN, (int) V8SI_FTYPE_PCV8SI_V8SI },
25788 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_maskloadq256, "__builtin_ia32_maskloadq256", IX86_BUILTIN_MASKLOADQ256, UNKNOWN, (int) V4DI_FTYPE_PCV4DI_V4DI },
25789 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_maskstored, "__builtin_ia32_maskstored", IX86_BUILTIN_MASKSTORED, UNKNOWN, (int) VOID_FTYPE_PV4SI_V4SI_V4SI },
25790 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_maskstoreq, "__builtin_ia32_maskstoreq", IX86_BUILTIN_MASKSTOREQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI_V2DI },
25791 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_maskstored256, "__builtin_ia32_maskstored256", IX86_BUILTIN_MASKSTORED256, UNKNOWN, (int) VOID_FTYPE_PV8SI_V8SI_V8SI },
25792 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_maskstoreq256, "__builtin_ia32_maskstoreq256", IX86_BUILTIN_MASKSTOREQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI_V4DI },
25794 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
25795 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
25796 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
25797 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
25798 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
25799 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
25802 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasesi, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
25803 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasedi, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
25804 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasesi, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
25805 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasedi, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
25806 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasesi, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
25807 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasedi, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
25808 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasesi, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
25809 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasedi, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
25812 /* Builtins with variable number of arguments. */
25813 static const struct builtin_description bdesc_args[] =
25815 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
25816 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
25817 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
25818 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
25819 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
25820 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
25821 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
25824 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25825 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25826 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25827 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25828 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25829 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25831 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25832 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25833 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25834 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25835 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25836 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25837 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25838 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25840 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25841 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25843 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25844 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25845 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25846 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25848 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25849 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25850 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25851 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25852 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25853 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25855 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25856 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25857 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
25858 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25859 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
25860 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
25862 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
25863 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
25864 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
25866 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
25868 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
25869 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
25870 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
25871 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
25872 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
25873 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
25875 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
25876 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
25877 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
25878 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
25879 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
25880 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
25882 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
25883 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
25884 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
25885 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
25888 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
25889 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
25890 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
25891 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
25893 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25894 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25895 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25896 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
25897 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
25898 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
25899 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25900 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25901 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25902 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25903 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25904 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25905 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25906 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25907 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25910 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
25911 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
25912 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
25913 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
25914 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25915 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
25918 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
25919 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
25920 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
25921 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
25922 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
25923 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
25924 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
25925 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
25926 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
25927 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
25928 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
25929 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
25931 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
25933 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25934 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25935 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25936 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25937 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25938 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25939 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25940 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25942 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
25943 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
25944 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
25945 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
25946 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
25947 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
25948 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
25949 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
25950 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
25951 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
25952 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
25953 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
25954 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
25955 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
25956 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
25957 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
25958 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
25959 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
25960 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
25961 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
25962 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
25963 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
25965 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25966 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25967 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25968 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25970 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25971 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25972 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25973 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25975 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25977 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25978 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25979 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25980 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25981 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
25983 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
25984 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
25985 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
25987 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
25989 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
25990 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
25991 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
25993 /* SSE MMX or 3Dnow!A */
25994 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25995 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25996 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
25998 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
25999 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
26000 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
26001 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
26003 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
26004 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
26006 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
26009 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
26011 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
26012 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
26013 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
26014 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
26015 { OPTION_MASK_ISA_SSE2, CODE_FOR_floatv4siv4sf2, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
26017 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
26018 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
26019 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
26020 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
26021 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
26023 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
26025 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
26026 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
26027 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
26028 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
26030 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
26031 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
26032 { OPTION_MASK_ISA_SSE2, CODE_FOR_fix_truncv4sfv4si2, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
26034 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26035 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26036 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26037 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26038 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26039 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26041 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26043 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
26044 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
26045 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
26046 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
26047 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
26048 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
26049 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
26050 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
26051 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
26052 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
26053 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
26054 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
26055 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
26056 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
26057 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
26058 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
26059 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
26060 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
26061 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
26062 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
26064 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26065 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26066 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26067 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26069 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26070 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26071 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26072 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26074 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26076 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26077 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26078 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26080 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
26082 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26083 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26084 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26085 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26086 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26087 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26088 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26089 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26091 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26092 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26093 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26094 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26095 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26096 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26097 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26098 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26100 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26101 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
26103 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26104 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26105 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26106 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26108 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26109 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26111 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26112 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26113 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26114 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26115 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26116 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26118 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26119 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26120 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26121 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26123 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26124 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26125 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26126 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26127 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26128 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26129 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26130 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26132 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
26133 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
26134 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
26136 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26137 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
26139 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
26140 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
26142 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
26144 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
26145 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
26146 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
26147 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
26149 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
26150 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
26151 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
26152 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
26153 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
26154 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
26155 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
26157 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
26158 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
26159 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
26160 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
26161 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
26162 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
26163 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
26165 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
26166 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
26167 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
26168 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
26170 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
26171 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
26172 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
26174 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
26176 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
26177 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
26179 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
26182 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
26183 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
26186 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
26187 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
26189 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
26190 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26191 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
26192 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26193 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
26194 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
26197 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
26198 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
26199 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
26200 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
26201 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
26202 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
26204 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26205 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
26206 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26207 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
26208 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26209 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
26210 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26211 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
26212 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26213 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
26214 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26215 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
26216 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
26217 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
26218 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26219 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
26220 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26221 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
26222 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26223 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
26224 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26225 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
26226 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26227 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
26230 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
26231 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
26234 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
26235 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
26236 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
26237 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
26238 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
26239 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
26240 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
26241 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
26242 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
26243 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
26245 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
26246 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
26247 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
26248 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
26249 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
26250 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
26251 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
26252 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
26253 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
26254 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
26255 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
26256 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
26257 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
26259 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
26260 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26261 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26262 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26263 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26264 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26265 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
26266 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26267 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26268 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
26269 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
26270 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26273 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
26274 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
26275 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
26276 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
26278 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_floorpd", IX86_BUILTIN_FLOORPD, (enum rtx_code) ROUND_FLOOR, (int) V2DF_FTYPE_V2DF_ROUND },
26279 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_ceilpd", IX86_BUILTIN_CEILPD, (enum rtx_code) ROUND_CEIL, (int) V2DF_FTYPE_V2DF_ROUND },
26280 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_truncpd", IX86_BUILTIN_TRUNCPD, (enum rtx_code) ROUND_TRUNC, (int) V2DF_FTYPE_V2DF_ROUND },
26281 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_rintpd", IX86_BUILTIN_RINTPD, (enum rtx_code) ROUND_MXCSR, (int) V2DF_FTYPE_V2DF_ROUND },
26283 { OPTION_MASK_ISA_ROUND, CODE_FOR_roundv2df2, "__builtin_ia32_roundpd_az", IX86_BUILTIN_ROUNDPD_AZ, UNKNOWN, (int) V2DF_FTYPE_V2DF },
26285 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_floorps", IX86_BUILTIN_FLOORPS, (enum rtx_code) ROUND_FLOOR, (int) V4SF_FTYPE_V4SF_ROUND },
26286 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_ceilps", IX86_BUILTIN_CEILPS, (enum rtx_code) ROUND_CEIL, (int) V4SF_FTYPE_V4SF_ROUND },
26287 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_truncps", IX86_BUILTIN_TRUNCPS, (enum rtx_code) ROUND_TRUNC, (int) V4SF_FTYPE_V4SF_ROUND },
26288 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_rintps", IX86_BUILTIN_RINTPS, (enum rtx_code) ROUND_MXCSR, (int) V4SF_FTYPE_V4SF_ROUND },
26290 { OPTION_MASK_ISA_ROUND, CODE_FOR_roundv4sf2, "__builtin_ia32_roundps_az", IX86_BUILTIN_ROUNDPS_AZ, UNKNOWN, (int) V4SF_FTYPE_V4SF },
26292 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
26293 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
26294 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
26297 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26298 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
26299 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
26300 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
26301 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
26304 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
26305 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
26306 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
26307 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26310 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
26311 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
26313 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26314 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26315 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26316 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26319 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
26322 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26323 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26324 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26325 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26326 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26327 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26328 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26329 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26330 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26331 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26332 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26333 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26334 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26335 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26336 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26337 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26338 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26339 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26340 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26341 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26342 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26343 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26344 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26345 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26346 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26347 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26349 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
26350 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
26351 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
26352 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
26354 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
26355 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
26356 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
26357 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
26358 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
26359 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
26360 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
26361 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
26362 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
26363 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
26364 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
26365 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
26366 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
26367 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
26368 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
26369 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
26370 { OPTION_MASK_ISA_AVX, CODE_FOR_floatv4siv4df2, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
26371 { OPTION_MASK_ISA_AVX, CODE_FOR_floatv8siv8sf2, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
26372 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
26373 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
26374 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
26375 { OPTION_MASK_ISA_AVX, CODE_FOR_fix_truncv4dfv4si2, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
26376 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
26377 { OPTION_MASK_ISA_AVX, CODE_FOR_fix_truncv8sfv8si2, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
26378 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
26379 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
26380 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
26381 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
26382 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
26383 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
26384 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
26385 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
26386 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
26387 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
26389 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
26390 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
26391 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
26393 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
26394 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
26395 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
26396 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
26397 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
26399 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
26401 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
26402 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
26404 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_floorpd256", IX86_BUILTIN_FLOORPD256, (enum rtx_code) ROUND_FLOOR, (int) V4DF_FTYPE_V4DF_ROUND },
26405 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_ceilpd256", IX86_BUILTIN_CEILPD256, (enum rtx_code) ROUND_CEIL, (int) V4DF_FTYPE_V4DF_ROUND },
26406 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_truncpd256", IX86_BUILTIN_TRUNCPD256, (enum rtx_code) ROUND_TRUNC, (int) V4DF_FTYPE_V4DF_ROUND },
26407 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_rintpd256", IX86_BUILTIN_RINTPD256, (enum rtx_code) ROUND_MXCSR, (int) V4DF_FTYPE_V4DF_ROUND },
26409 { OPTION_MASK_ISA_AVX, CODE_FOR_roundv4df2, "__builtin_ia32_roundpd_az256", IX86_BUILTIN_ROUNDPD_AZ256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
26411 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_floorps256", IX86_BUILTIN_FLOORPS256, (enum rtx_code) ROUND_FLOOR, (int) V8SF_FTYPE_V8SF_ROUND },
26412 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_ceilps256", IX86_BUILTIN_CEILPS256, (enum rtx_code) ROUND_CEIL, (int) V8SF_FTYPE_V8SF_ROUND },
26413 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_truncps256", IX86_BUILTIN_TRUNCPS256, (enum rtx_code) ROUND_TRUNC, (int) V8SF_FTYPE_V8SF_ROUND },
26414 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_rintps256", IX86_BUILTIN_RINTPS256, (enum rtx_code) ROUND_MXCSR, (int) V8SF_FTYPE_V8SF_ROUND },
26416 { OPTION_MASK_ISA_AVX, CODE_FOR_roundv8sf2, "__builtin_ia32_roundps_az256", IX86_BUILTIN_ROUNDPS_AZ256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
26418 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26419 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26420 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26421 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26423 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
26424 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
26425 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
26426 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
26427 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
26428 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
26430 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
26431 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
26432 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
26433 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
26434 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
26435 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
26436 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
26437 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
26438 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
26439 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
26440 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
26441 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
26442 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
26443 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
26444 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
26446 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
26447 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
26449 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv8sf3, "__builtin_ia32_copysignps256", IX86_BUILTIN_CPYSGNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26450 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv4df3, "__builtin_ia32_copysignpd256", IX86_BUILTIN_CPYSGNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
26452 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_pack_sfix_v4df, "__builtin_ia32_vec_pack_sfix256 ", IX86_BUILTIN_VEC_PACK_SFIX256, UNKNOWN, (int) V8SI_FTYPE_V4DF_V4DF },
26455 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_mpsadbw, "__builtin_ia32_mpsadbw256", IX86_BUILTIN_MPSADBW256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_INT },
26456 { OPTION_MASK_ISA_AVX2, CODE_FOR_absv32qi2, "__builtin_ia32_pabsb256", IX86_BUILTIN_PABSB256, UNKNOWN, (int) V32QI_FTYPE_V32QI },
26457 { OPTION_MASK_ISA_AVX2, CODE_FOR_absv16hi2, "__builtin_ia32_pabsw256", IX86_BUILTIN_PABSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI },
26458 { OPTION_MASK_ISA_AVX2, CODE_FOR_absv8si2, "__builtin_ia32_pabsd256", IX86_BUILTIN_PABSD256, UNKNOWN, (int) V8SI_FTYPE_V8SI },
26459 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_packssdw, "__builtin_ia32_packssdw256", IX86_BUILTIN_PACKSSDW256, UNKNOWN, (int) V16HI_FTYPE_V8SI_V8SI },
26460 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_packsswb, "__builtin_ia32_packsswb256", IX86_BUILTIN_PACKSSWB256, UNKNOWN, (int) V32QI_FTYPE_V16HI_V16HI },
26461 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_packusdw, "__builtin_ia32_packusdw256", IX86_BUILTIN_PACKUSDW256, UNKNOWN, (int) V16HI_FTYPE_V8SI_V8SI },
26462 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_packuswb, "__builtin_ia32_packuswb256", IX86_BUILTIN_PACKUSWB256, UNKNOWN, (int) V32QI_FTYPE_V16HI_V16HI },
26463 { OPTION_MASK_ISA_AVX2, CODE_FOR_addv32qi3, "__builtin_ia32_paddb256", IX86_BUILTIN_PADDB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26464 { OPTION_MASK_ISA_AVX2, CODE_FOR_addv16hi3, "__builtin_ia32_paddw256", IX86_BUILTIN_PADDW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26465 { OPTION_MASK_ISA_AVX2, CODE_FOR_addv8si3, "__builtin_ia32_paddd256", IX86_BUILTIN_PADDD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26466 { OPTION_MASK_ISA_AVX2, CODE_FOR_addv4di3, "__builtin_ia32_paddq256", IX86_BUILTIN_PADDQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26467 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ssaddv32qi3, "__builtin_ia32_paddsb256", IX86_BUILTIN_PADDSB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26468 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ssaddv16hi3, "__builtin_ia32_paddsw256", IX86_BUILTIN_PADDSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26469 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_usaddv32qi3, "__builtin_ia32_paddusb256", IX86_BUILTIN_PADDUSB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26470 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_usaddv16hi3, "__builtin_ia32_paddusw256", IX86_BUILTIN_PADDUSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26471 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_palignrv2ti, "__builtin_ia32_palignr256", IX86_BUILTIN_PALIGNR256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_INT_CONVERT },
26472 { OPTION_MASK_ISA_AVX2, CODE_FOR_andv4di3, "__builtin_ia32_andsi256", IX86_BUILTIN_AND256I, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26473 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_andnotv4di3, "__builtin_ia32_andnotsi256", IX86_BUILTIN_ANDNOT256I, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26474 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_uavgv32qi3, "__builtin_ia32_pavgb256", IX86_BUILTIN_PAVGB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26475 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_uavgv16hi3, "__builtin_ia32_pavgw256", IX86_BUILTIN_PAVGW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26476 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pblendvb, "__builtin_ia32_pblendvb256", IX86_BUILTIN_PBLENDVB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI_V32QI },
26477 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pblendw, "__builtin_ia32_pblendw256", IX86_BUILTIN_PBLENDVW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI_INT },
26478 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_eqv32qi3, "__builtin_ia32_pcmpeqb256", IX86_BUILTIN_PCMPEQB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26479 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_eqv16hi3, "__builtin_ia32_pcmpeqw256", IX86_BUILTIN_PCMPEQW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26480 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_eqv8si3, "__builtin_ia32_pcmpeqd256", IX86_BUILTIN_PCMPEQD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26481 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_eqv4di3, "__builtin_ia32_pcmpeqq256", IX86_BUILTIN_PCMPEQQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26482 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_gtv32qi3, "__builtin_ia32_pcmpgtb256", IX86_BUILTIN_PCMPGTB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26483 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_gtv16hi3, "__builtin_ia32_pcmpgtw256", IX86_BUILTIN_PCMPGTW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26484 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_gtv8si3, "__builtin_ia32_pcmpgtd256", IX86_BUILTIN_PCMPGTD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26485 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_gtv4di3, "__builtin_ia32_pcmpgtq256", IX86_BUILTIN_PCMPGTQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26486 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_phaddwv16hi3, "__builtin_ia32_phaddw256", IX86_BUILTIN_PHADDW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26487 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_phadddv8si3, "__builtin_ia32_phaddd256", IX86_BUILTIN_PHADDD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26488 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_phaddswv16hi3, "__builtin_ia32_phaddsw256", IX86_BUILTIN_PHADDSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26489 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_phsubwv16hi3, "__builtin_ia32_phsubw256", IX86_BUILTIN_PHSUBW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26490 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_phsubdv8si3, "__builtin_ia32_phsubd256", IX86_BUILTIN_PHSUBD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26491 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_phsubswv16hi3, "__builtin_ia32_phsubsw256", IX86_BUILTIN_PHSUBSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26492 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pmaddubsw256, "__builtin_ia32_pmaddubsw256", IX86_BUILTIN_PMADDUBSW256, UNKNOWN, (int) V16HI_FTYPE_V32QI_V32QI },
26493 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pmaddwd, "__builtin_ia32_pmaddwd256", IX86_BUILTIN_PMADDWD256, UNKNOWN, (int) V8SI_FTYPE_V16HI_V16HI },
26494 { OPTION_MASK_ISA_AVX2, CODE_FOR_smaxv32qi3, "__builtin_ia32_pmaxsb256", IX86_BUILTIN_PMAXSB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26495 { OPTION_MASK_ISA_AVX2, CODE_FOR_smaxv16hi3, "__builtin_ia32_pmaxsw256", IX86_BUILTIN_PMAXSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26496 { OPTION_MASK_ISA_AVX2, CODE_FOR_smaxv8si3 , "__builtin_ia32_pmaxsd256", IX86_BUILTIN_PMAXSD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26497 { OPTION_MASK_ISA_AVX2, CODE_FOR_umaxv32qi3, "__builtin_ia32_pmaxub256", IX86_BUILTIN_PMAXUB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26498 { OPTION_MASK_ISA_AVX2, CODE_FOR_umaxv16hi3, "__builtin_ia32_pmaxuw256", IX86_BUILTIN_PMAXUW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26499 { OPTION_MASK_ISA_AVX2, CODE_FOR_umaxv8si3 , "__builtin_ia32_pmaxud256", IX86_BUILTIN_PMAXUD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26500 { OPTION_MASK_ISA_AVX2, CODE_FOR_sminv32qi3, "__builtin_ia32_pminsb256", IX86_BUILTIN_PMINSB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26501 { OPTION_MASK_ISA_AVX2, CODE_FOR_sminv16hi3, "__builtin_ia32_pminsw256", IX86_BUILTIN_PMINSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26502 { OPTION_MASK_ISA_AVX2, CODE_FOR_sminv8si3 , "__builtin_ia32_pminsd256", IX86_BUILTIN_PMINSD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26503 { OPTION_MASK_ISA_AVX2, CODE_FOR_uminv32qi3, "__builtin_ia32_pminub256", IX86_BUILTIN_PMINUB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26504 { OPTION_MASK_ISA_AVX2, CODE_FOR_uminv16hi3, "__builtin_ia32_pminuw256", IX86_BUILTIN_PMINUW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26505 { OPTION_MASK_ISA_AVX2, CODE_FOR_uminv8si3 , "__builtin_ia32_pminud256", IX86_BUILTIN_PMINUD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26506 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pmovmskb, "__builtin_ia32_pmovmskb256", IX86_BUILTIN_PMOVMSKB256, UNKNOWN, (int) INT_FTYPE_V32QI },
26507 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_sign_extendv16qiv16hi2, "__builtin_ia32_pmovsxbw256", IX86_BUILTIN_PMOVSXBW256, UNKNOWN, (int) V16HI_FTYPE_V16QI },
26508 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_sign_extendv8qiv8si2 , "__builtin_ia32_pmovsxbd256", IX86_BUILTIN_PMOVSXBD256, UNKNOWN, (int) V8SI_FTYPE_V16QI },
26509 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_sign_extendv4qiv4di2 , "__builtin_ia32_pmovsxbq256", IX86_BUILTIN_PMOVSXBQ256, UNKNOWN, (int) V4DI_FTYPE_V16QI },
26510 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_sign_extendv8hiv8si2 , "__builtin_ia32_pmovsxwd256", IX86_BUILTIN_PMOVSXWD256, UNKNOWN, (int) V8SI_FTYPE_V8HI },
26511 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_sign_extendv4hiv4di2 , "__builtin_ia32_pmovsxwq256", IX86_BUILTIN_PMOVSXWQ256, UNKNOWN, (int) V4DI_FTYPE_V8HI },
26512 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_sign_extendv4siv4di2 , "__builtin_ia32_pmovsxdq256", IX86_BUILTIN_PMOVSXDQ256, UNKNOWN, (int) V4DI_FTYPE_V4SI },
26513 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_zero_extendv16qiv16hi2, "__builtin_ia32_pmovzxbw256", IX86_BUILTIN_PMOVZXBW256, UNKNOWN, (int) V16HI_FTYPE_V16QI },
26514 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_zero_extendv8qiv8si2 , "__builtin_ia32_pmovzxbd256", IX86_BUILTIN_PMOVZXBD256, UNKNOWN, (int) V8SI_FTYPE_V16QI },
26515 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_zero_extendv4qiv4di2 , "__builtin_ia32_pmovzxbq256", IX86_BUILTIN_PMOVZXBQ256, UNKNOWN, (int) V4DI_FTYPE_V16QI },
26516 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_zero_extendv8hiv8si2 , "__builtin_ia32_pmovzxwd256", IX86_BUILTIN_PMOVZXWD256, UNKNOWN, (int) V8SI_FTYPE_V8HI },
26517 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_zero_extendv4hiv4di2 , "__builtin_ia32_pmovzxwq256", IX86_BUILTIN_PMOVZXWQ256, UNKNOWN, (int) V4DI_FTYPE_V8HI },
26518 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_zero_extendv4siv4di2 , "__builtin_ia32_pmovzxdq256", IX86_BUILTIN_PMOVZXDQ256, UNKNOWN, (int) V4DI_FTYPE_V4SI },
26519 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_mulv4siv4di3 , "__builtin_ia32_pmuldq256" , IX86_BUILTIN_PMULDQ256 , UNKNOWN, (int) V4DI_FTYPE_V8SI_V8SI },
26520 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_umulhrswv16hi3 , "__builtin_ia32_pmulhrsw256", IX86_BUILTIN_PMULHRSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26521 { OPTION_MASK_ISA_AVX2, CODE_FOR_umulv16hi3_highpart, "__builtin_ia32_pmulhuw256" , IX86_BUILTIN_PMULHUW256 , UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26522 { OPTION_MASK_ISA_AVX2, CODE_FOR_smulv16hi3_highpart, "__builtin_ia32_pmulhw256" , IX86_BUILTIN_PMULHW256 , UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26523 { OPTION_MASK_ISA_AVX2, CODE_FOR_mulv16hi3, "__builtin_ia32_pmullw256" , IX86_BUILTIN_PMULLW256 , UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26524 { OPTION_MASK_ISA_AVX2, CODE_FOR_mulv8si3, "__builtin_ia32_pmulld256" , IX86_BUILTIN_PMULLD256 , UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26525 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_umulv4siv4di3 , "__builtin_ia32_pmuludq256" , IX86_BUILTIN_PMULUDQ256 , UNKNOWN, (int) V4DI_FTYPE_V8SI_V8SI },
26526 { OPTION_MASK_ISA_AVX2, CODE_FOR_iorv4di3, "__builtin_ia32_por256", IX86_BUILTIN_POR256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26527 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_psadbw, "__builtin_ia32_psadbw256", IX86_BUILTIN_PSADBW256, UNKNOWN, (int) V16HI_FTYPE_V32QI_V32QI },
26528 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pshufbv32qi3, "__builtin_ia32_pshufb256", IX86_BUILTIN_PSHUFB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26529 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pshufdv3, "__builtin_ia32_pshufd256", IX86_BUILTIN_PSHUFD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_INT },
26530 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pshufhwv3, "__builtin_ia32_pshufhw256", IX86_BUILTIN_PSHUFHW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_INT },
26531 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pshuflwv3, "__builtin_ia32_pshuflw256", IX86_BUILTIN_PSHUFLW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_INT },
26532 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_psignv32qi3, "__builtin_ia32_psignb256", IX86_BUILTIN_PSIGNB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26533 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_psignv16hi3, "__builtin_ia32_psignw256", IX86_BUILTIN_PSIGNW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26534 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_psignv8si3 , "__builtin_ia32_psignd256", IX86_BUILTIN_PSIGND256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26535 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlv2ti3, "__builtin_ia32_pslldqi256", IX86_BUILTIN_PSLLDQI256, UNKNOWN, (int) V4DI_FTYPE_V4DI_INT_CONVERT },
26536 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashlv16hi3, "__builtin_ia32_psllwi256", IX86_BUILTIN_PSLLWI256 , UNKNOWN, (int) V16HI_FTYPE_V16HI_SI_COUNT },
26537 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashlv16hi3, "__builtin_ia32_psllw256", IX86_BUILTIN_PSLLW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V8HI_COUNT },
26538 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashlv8si3, "__builtin_ia32_pslldi256", IX86_BUILTIN_PSLLDI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_SI_COUNT },
26539 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashlv8si3, "__builtin_ia32_pslld256", IX86_BUILTIN_PSLLD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_COUNT },
26540 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashlv4di3, "__builtin_ia32_psllqi256", IX86_BUILTIN_PSLLQI256, UNKNOWN, (int) V4DI_FTYPE_V4DI_INT_COUNT },
26541 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashlv4di3, "__builtin_ia32_psllq256", IX86_BUILTIN_PSLLQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V2DI_COUNT },
26542 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashrv16hi3, "__builtin_ia32_psrawi256", IX86_BUILTIN_PSRAWI256, UNKNOWN, (int) V16HI_FTYPE_V16HI_SI_COUNT },
26543 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashrv16hi3, "__builtin_ia32_psraw256", IX86_BUILTIN_PSRAW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V8HI_COUNT },
26544 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashrv8si3, "__builtin_ia32_psradi256", IX86_BUILTIN_PSRADI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_SI_COUNT },
26545 { OPTION_MASK_ISA_AVX2, CODE_FOR_ashrv8si3, "__builtin_ia32_psrad256", IX86_BUILTIN_PSRAD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_COUNT },
26546 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshrv2ti3, "__builtin_ia32_psrldqi256", IX86_BUILTIN_PSRLDQI256, UNKNOWN, (int) V4DI_FTYPE_V4DI_INT_CONVERT },
26547 { OPTION_MASK_ISA_AVX2, CODE_FOR_lshrv16hi3, "__builtin_ia32_psrlwi256", IX86_BUILTIN_PSRLWI256 , UNKNOWN, (int) V16HI_FTYPE_V16HI_SI_COUNT },
26548 { OPTION_MASK_ISA_AVX2, CODE_FOR_lshrv16hi3, "__builtin_ia32_psrlw256", IX86_BUILTIN_PSRLW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V8HI_COUNT },
26549 { OPTION_MASK_ISA_AVX2, CODE_FOR_lshrv8si3, "__builtin_ia32_psrldi256", IX86_BUILTIN_PSRLDI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_SI_COUNT },
26550 { OPTION_MASK_ISA_AVX2, CODE_FOR_lshrv8si3, "__builtin_ia32_psrld256", IX86_BUILTIN_PSRLD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_COUNT },
26551 { OPTION_MASK_ISA_AVX2, CODE_FOR_lshrv4di3, "__builtin_ia32_psrlqi256", IX86_BUILTIN_PSRLQI256, UNKNOWN, (int) V4DI_FTYPE_V4DI_INT_COUNT },
26552 { OPTION_MASK_ISA_AVX2, CODE_FOR_lshrv4di3, "__builtin_ia32_psrlq256", IX86_BUILTIN_PSRLQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V2DI_COUNT },
26553 { OPTION_MASK_ISA_AVX2, CODE_FOR_subv32qi3, "__builtin_ia32_psubb256", IX86_BUILTIN_PSUBB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26554 { OPTION_MASK_ISA_AVX2, CODE_FOR_subv16hi3, "__builtin_ia32_psubw256", IX86_BUILTIN_PSUBW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26555 { OPTION_MASK_ISA_AVX2, CODE_FOR_subv8si3, "__builtin_ia32_psubd256", IX86_BUILTIN_PSUBD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26556 { OPTION_MASK_ISA_AVX2, CODE_FOR_subv4di3, "__builtin_ia32_psubq256", IX86_BUILTIN_PSUBQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26557 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_sssubv32qi3, "__builtin_ia32_psubsb256", IX86_BUILTIN_PSUBSB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26558 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_sssubv16hi3, "__builtin_ia32_psubsw256", IX86_BUILTIN_PSUBSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26559 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ussubv32qi3, "__builtin_ia32_psubusb256", IX86_BUILTIN_PSUBUSB256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26560 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ussubv16hi3, "__builtin_ia32_psubusw256", IX86_BUILTIN_PSUBUSW256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26561 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_interleave_highv32qi, "__builtin_ia32_punpckhbw256", IX86_BUILTIN_PUNPCKHBW256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26562 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_interleave_highv16hi, "__builtin_ia32_punpckhwd256", IX86_BUILTIN_PUNPCKHWD256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26563 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_interleave_highv8si, "__builtin_ia32_punpckhdq256", IX86_BUILTIN_PUNPCKHDQ256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26564 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_interleave_highv4di, "__builtin_ia32_punpckhqdq256", IX86_BUILTIN_PUNPCKHQDQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26565 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_interleave_lowv32qi, "__builtin_ia32_punpcklbw256", IX86_BUILTIN_PUNPCKLBW256, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI },
26566 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_interleave_lowv16hi, "__builtin_ia32_punpcklwd256", IX86_BUILTIN_PUNPCKLWD256, UNKNOWN, (int) V16HI_FTYPE_V16HI_V16HI },
26567 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_interleave_lowv8si, "__builtin_ia32_punpckldq256", IX86_BUILTIN_PUNPCKLDQ256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26568 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_interleave_lowv4di, "__builtin_ia32_punpcklqdq256", IX86_BUILTIN_PUNPCKLQDQ256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26569 { OPTION_MASK_ISA_AVX2, CODE_FOR_xorv4di3, "__builtin_ia32_pxor256", IX86_BUILTIN_PXOR256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26570 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_vec_dupv4sf, "__builtin_ia32_vbroadcastss_ps", IX86_BUILTIN_VBROADCASTSS_PS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
26571 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_vec_dupv8sf, "__builtin_ia32_vbroadcastss_ps256", IX86_BUILTIN_VBROADCASTSS_PS256, UNKNOWN, (int) V8SF_FTYPE_V4SF },
26572 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_vec_dupv4df, "__builtin_ia32_vbroadcastsd_pd256", IX86_BUILTIN_VBROADCASTSD_PD256, UNKNOWN, (int) V4DF_FTYPE_V2DF },
26573 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_vbroadcasti128_v4di, "__builtin_ia32_vbroadcastsi256", IX86_BUILTIN_VBROADCASTSI256, UNKNOWN, (int) V4DI_FTYPE_V2DI },
26574 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pblenddv4si, "__builtin_ia32_pblendd128", IX86_BUILTIN_PBLENDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_INT },
26575 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pblenddv8si, "__builtin_ia32_pblendd256", IX86_BUILTIN_PBLENDD256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
26576 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pbroadcastv32qi, "__builtin_ia32_pbroadcastb256", IX86_BUILTIN_PBROADCASTB256, UNKNOWN, (int) V32QI_FTYPE_V16QI },
26577 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pbroadcastv16hi, "__builtin_ia32_pbroadcastw256", IX86_BUILTIN_PBROADCASTW256, UNKNOWN, (int) V16HI_FTYPE_V8HI },
26578 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pbroadcastv8si, "__builtin_ia32_pbroadcastd256", IX86_BUILTIN_PBROADCASTD256, UNKNOWN, (int) V8SI_FTYPE_V4SI },
26579 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pbroadcastv4di, "__builtin_ia32_pbroadcastq256", IX86_BUILTIN_PBROADCASTQ256, UNKNOWN, (int) V4DI_FTYPE_V2DI },
26580 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pbroadcastv16qi, "__builtin_ia32_pbroadcastb128", IX86_BUILTIN_PBROADCASTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
26581 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pbroadcastv8hi, "__builtin_ia32_pbroadcastw128", IX86_BUILTIN_PBROADCASTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
26582 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pbroadcastv4si, "__builtin_ia32_pbroadcastd128", IX86_BUILTIN_PBROADCASTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
26583 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_pbroadcastv2di, "__builtin_ia32_pbroadcastq128", IX86_BUILTIN_PBROADCASTQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
26584 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_permvarv8si, "__builtin_ia32_permvarsi256", IX86_BUILTIN_VPERMVARSI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26585 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_permv4df, "__builtin_ia32_permdf256", IX86_BUILTIN_VPERMDF256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
26586 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_permvarv8sf, "__builtin_ia32_permvarsf256", IX86_BUILTIN_VPERMVARSF256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
26587 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_permv4di, "__builtin_ia32_permdi256", IX86_BUILTIN_VPERMDI256, UNKNOWN, (int) V4DI_FTYPE_V4DI_INT },
26588 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_permv2ti, "__builtin_ia32_permti256", IX86_BUILTIN_VPERMTI256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI_INT },
26589 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_extracti128, "__builtin_ia32_extract128i256", IX86_BUILTIN_VEXTRACT128I256, UNKNOWN, (int) V2DI_FTYPE_V4DI_INT },
26590 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_inserti128, "__builtin_ia32_insert128i256", IX86_BUILTIN_VINSERT128I256, UNKNOWN, (int) V4DI_FTYPE_V4DI_V2DI_INT },
26591 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv4di, "__builtin_ia32_psllv4di", IX86_BUILTIN_PSLLVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26592 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv2di, "__builtin_ia32_psllv2di", IX86_BUILTIN_PSLLVV2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26593 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv8si, "__builtin_ia32_psllv8si", IX86_BUILTIN_PSLLVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26594 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashlvv4si, "__builtin_ia32_psllv4si", IX86_BUILTIN_PSLLVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26595 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashrvv8si, "__builtin_ia32_psrav8si", IX86_BUILTIN_PSRAVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26596 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_ashrvv4si, "__builtin_ia32_psrav4si", IX86_BUILTIN_PSRAVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26597 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshrvv4di, "__builtin_ia32_psrlv4di", IX86_BUILTIN_PSRLVV4DI, UNKNOWN, (int) V4DI_FTYPE_V4DI_V4DI },
26598 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshrvv2di, "__builtin_ia32_psrlv2di", IX86_BUILTIN_PSRLVV2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
26599 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshrvv8si, "__builtin_ia32_psrlv8si", IX86_BUILTIN_PSRLVV8SI, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI },
26600 { OPTION_MASK_ISA_AVX2, CODE_FOR_avx2_lshrvv4si, "__builtin_ia32_psrlv4si", IX86_BUILTIN_PSRLVV4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
26602 { OPTION_MASK_ISA_LZCNT, CODE_FOR_clzhi2_lzcnt, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
26605 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_si, "__builtin_ia32_bextr_u32", IX86_BUILTIN_BEXTR32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
26606 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_di, "__builtin_ia32_bextr_u64", IX86_BUILTIN_BEXTR64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
26607 { OPTION_MASK_ISA_BMI, CODE_FOR_ctzhi2, "__builtin_ctzs", IX86_BUILTIN_CTZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
26610 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_si, "__builtin_ia32_bextri_u32", IX86_BUILTIN_BEXTRI32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
26611 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_di, "__builtin_ia32_bextri_u64", IX86_BUILTIN_BEXTRI64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
26614 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS, UNKNOWN, (int) V4SF_FTYPE_V8HI },
26615 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps256, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256, UNKNOWN, (int) V8SF_FTYPE_V8HI },
26616 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH, UNKNOWN, (int) V8HI_FTYPE_V4SF_INT },
26617 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph256, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256, UNKNOWN, (int) V8HI_FTYPE_V8SF_INT },
26620 { OPTION_MASK_ISA_BMI2, CODE_FOR_bmi2_bzhi_si3, "__builtin_ia32_bzhi_si", IX86_BUILTIN_BZHI32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
26621 { OPTION_MASK_ISA_BMI2, CODE_FOR_bmi2_bzhi_di3, "__builtin_ia32_bzhi_di", IX86_BUILTIN_BZHI64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
26622 { OPTION_MASK_ISA_BMI2, CODE_FOR_bmi2_pdep_si3, "__builtin_ia32_pdep_si", IX86_BUILTIN_PDEP32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
26623 { OPTION_MASK_ISA_BMI2, CODE_FOR_bmi2_pdep_di3, "__builtin_ia32_pdep_di", IX86_BUILTIN_PDEP64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
26624 { OPTION_MASK_ISA_BMI2, CODE_FOR_bmi2_pext_si3, "__builtin_ia32_pext_si", IX86_BUILTIN_PEXT32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
26625 { OPTION_MASK_ISA_BMI2, CODE_FOR_bmi2_pext_di3, "__builtin_ia32_pext_di", IX86_BUILTIN_PEXT64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
26628 /* FMA4 and XOP. */
26629 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
26630 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
26631 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
26632 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
26633 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
26634 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
26635 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
26636 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
26637 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
26638 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
26639 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
26640 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
26641 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
26642 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
26643 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
26644 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
26645 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
26646 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
26647 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
26648 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
26649 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
26650 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
26651 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
26652 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
26653 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
26654 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
26655 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
26656 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
26657 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
26658 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
26659 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
26660 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
26661 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
26662 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
26663 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
26664 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
26665 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
26666 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
26667 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
26668 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
26669 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
26670 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
26671 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
26672 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
26673 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
26674 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
26675 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
26676 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
26677 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
26678 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
26679 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
26680 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
26682 static const struct builtin_description bdesc_multi_arg[] =
26684 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v4sf,
26685 "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS,
26686 UNKNOWN, (int)MULTI_ARG_3_SF },
26687 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v2df,
26688 "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD,
26689 UNKNOWN, (int)MULTI_ARG_3_DF },
26691 { OPTION_MASK_ISA_FMA, CODE_FOR_fmai_vmfmadd_v4sf,
26692 "__builtin_ia32_vfmaddss3", IX86_BUILTIN_VFMADDSS3,
26693 UNKNOWN, (int)MULTI_ARG_3_SF },
26694 { OPTION_MASK_ISA_FMA, CODE_FOR_fmai_vmfmadd_v2df,
26695 "__builtin_ia32_vfmaddsd3", IX86_BUILTIN_VFMADDSD3,
26696 UNKNOWN, (int)MULTI_ARG_3_DF },
26698 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4sf,
26699 "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS,
26700 UNKNOWN, (int)MULTI_ARG_3_SF },
26701 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v2df,
26702 "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD,
26703 UNKNOWN, (int)MULTI_ARG_3_DF },
26704 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v8sf,
26705 "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256,
26706 UNKNOWN, (int)MULTI_ARG_3_SF2 },
26707 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4df,
26708 "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256,
26709 UNKNOWN, (int)MULTI_ARG_3_DF2 },
26711 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4sf,
26712 "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS,
26713 UNKNOWN, (int)MULTI_ARG_3_SF },
26714 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v2df,
26715 "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD,
26716 UNKNOWN, (int)MULTI_ARG_3_DF },
26717 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v8sf,
26718 "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256,
26719 UNKNOWN, (int)MULTI_ARG_3_SF2 },
26720 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4df,
26721 "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256,
26722 UNKNOWN, (int)MULTI_ARG_3_DF2 },
26724 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
26725 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
26726 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
26727 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
26728 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
26729 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
26730 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
26732 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
26733 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
26734 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
26735 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
26736 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
26737 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
26738 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
26740 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
26742 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
26743 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
26744 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
26745 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
26746 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
26747 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
26748 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
26749 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
26750 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
26751 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
26752 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
26753 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
26755 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
26756 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
26757 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
26758 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
26759 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
26760 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
26761 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
26762 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
26763 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_shav2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
26764 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_shav4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
26765 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_shav8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
26766 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_shav16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
26767 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_shlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
26768 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_shlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
26769 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_shlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
26770 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_shlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
26772 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
26773 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
26774 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
26775 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
26776 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
26777 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
26779 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
26780 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
26781 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
26782 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
26783 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
26784 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
26785 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
26786 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
26787 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
26788 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
26789 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
26790 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
26791 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
26792 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
26793 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
26795 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
26796 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
26797 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
26798 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
26799 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
26800 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
26801 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
26803 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
26804 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
26805 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
26806 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
26807 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
26808 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
26809 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
26811 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
26812 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
26813 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
26814 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
26815 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
26816 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
26817 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
26819 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
26820 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
26821 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
26822 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
26823 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
26824 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
26825 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
26827 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
26828 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
26829 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
26830 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
26831 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
26832 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
26833 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
26835 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
26836 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
26837 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
26838 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
26839 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
26840 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
26841 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
26843 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
26844 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
26845 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
26846 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
26847 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
26848 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
26849 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
26851 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
26852 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
26853 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
26854 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
26855 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
26856 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
26857 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
26859 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
26860 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
26861 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
26862 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
26863 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
26864 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
26865 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
26866 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
26868 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
26869 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
26870 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
26871 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
26872 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
26873 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
26874 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
26875 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
26877 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
26878 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
26879 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
26880 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
26884 /* TM vector builtins. */
26886 /* Reuse the existing x86-specific `struct builtin_description' cause
26887 we're lazy. Add casts to make them fit. */
26888 static const struct builtin_description bdesc_tm[] =
26890 { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_WM64", (enum ix86_builtins) BUILT_IN_TM_STORE_M64, UNKNOWN, VOID_FTYPE_PV2SI_V2SI },
26891 { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_WaRM64", (enum ix86_builtins) BUILT_IN_TM_STORE_WAR_M64, UNKNOWN, VOID_FTYPE_PV2SI_V2SI },
26892 { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_WaWM64", (enum ix86_builtins) BUILT_IN_TM_STORE_WAW_M64, UNKNOWN, VOID_FTYPE_PV2SI_V2SI },
26893 { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_RM64", (enum ix86_builtins) BUILT_IN_TM_LOAD_M64, UNKNOWN, V2SI_FTYPE_PCV2SI },
26894 { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_RaRM64", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAR_M64, UNKNOWN, V2SI_FTYPE_PCV2SI },
26895 { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_RaWM64", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAW_M64, UNKNOWN, V2SI_FTYPE_PCV2SI },
26896 { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_RfWM64", (enum ix86_builtins) BUILT_IN_TM_LOAD_RFW_M64, UNKNOWN, V2SI_FTYPE_PCV2SI },
26898 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_WM128", (enum ix86_builtins) BUILT_IN_TM_STORE_M128, UNKNOWN, VOID_FTYPE_PV4SF_V4SF },
26899 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_WaRM128", (enum ix86_builtins) BUILT_IN_TM_STORE_WAR_M128, UNKNOWN, VOID_FTYPE_PV4SF_V4SF },
26900 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_WaWM128", (enum ix86_builtins) BUILT_IN_TM_STORE_WAW_M128, UNKNOWN, VOID_FTYPE_PV4SF_V4SF },
26901 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_RM128", (enum ix86_builtins) BUILT_IN_TM_LOAD_M128, UNKNOWN, V4SF_FTYPE_PCV4SF },
26902 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_RaRM128", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAR_M128, UNKNOWN, V4SF_FTYPE_PCV4SF },
26903 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_RaWM128", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAW_M128, UNKNOWN, V4SF_FTYPE_PCV4SF },
26904 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_RfWM128", (enum ix86_builtins) BUILT_IN_TM_LOAD_RFW_M128, UNKNOWN, V4SF_FTYPE_PCV4SF },
26906 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_WM256", (enum ix86_builtins) BUILT_IN_TM_STORE_M256, UNKNOWN, VOID_FTYPE_PV8SF_V8SF },
26907 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_WaRM256", (enum ix86_builtins) BUILT_IN_TM_STORE_WAR_M256, UNKNOWN, VOID_FTYPE_PV8SF_V8SF },
26908 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_WaWM256", (enum ix86_builtins) BUILT_IN_TM_STORE_WAW_M256, UNKNOWN, VOID_FTYPE_PV8SF_V8SF },
26909 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_RM256", (enum ix86_builtins) BUILT_IN_TM_LOAD_M256, UNKNOWN, V8SF_FTYPE_PCV8SF },
26910 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_RaRM256", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAR_M256, UNKNOWN, V8SF_FTYPE_PCV8SF },
26911 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_RaWM256", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAW_M256, UNKNOWN, V8SF_FTYPE_PCV8SF },
26912 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_RfWM256", (enum ix86_builtins) BUILT_IN_TM_LOAD_RFW_M256, UNKNOWN, V8SF_FTYPE_PCV8SF },
26914 { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_LM64", (enum ix86_builtins) BUILT_IN_TM_LOG_M64, UNKNOWN, VOID_FTYPE_PCVOID },
26915 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_LM128", (enum ix86_builtins) BUILT_IN_TM_LOG_M128, UNKNOWN, VOID_FTYPE_PCVOID },
26916 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_LM256", (enum ix86_builtins) BUILT_IN_TM_LOG_M256, UNKNOWN, VOID_FTYPE_PCVOID },
26919 /* TM callbacks. */
26921 /* Return the builtin decl needed to load a vector of TYPE. */
26924 ix86_builtin_tm_load (tree type)
26926 if (TREE_CODE (type) == VECTOR_TYPE)
26928 switch (tree_low_cst (TYPE_SIZE (type), 1))
26931 return builtin_decl_explicit (BUILT_IN_TM_LOAD_M64);
26933 return builtin_decl_explicit (BUILT_IN_TM_LOAD_M128);
26935 return builtin_decl_explicit (BUILT_IN_TM_LOAD_M256);
26941 /* Return the builtin decl needed to store a vector of TYPE. */
26944 ix86_builtin_tm_store (tree type)
26946 if (TREE_CODE (type) == VECTOR_TYPE)
26948 switch (tree_low_cst (TYPE_SIZE (type), 1))
26951 return builtin_decl_explicit (BUILT_IN_TM_STORE_M64);
26953 return builtin_decl_explicit (BUILT_IN_TM_STORE_M128);
26955 return builtin_decl_explicit (BUILT_IN_TM_STORE_M256);
26961 /* Initialize the transactional memory vector load/store builtins. */
26964 ix86_init_tm_builtins (void)
26966 enum ix86_builtin_func_type ftype;
26967 const struct builtin_description *d;
26970 tree attrs_load, attrs_type_load, attrs_store, attrs_type_store;
26971 tree attrs_log, attrs_type_log;
26976 /* Use whatever attributes a normal TM load has. */
26977 decl = builtin_decl_explicit (BUILT_IN_TM_LOAD_1);
26978 attrs_load = DECL_ATTRIBUTES (decl);
26979 attrs_type_load = TYPE_ATTRIBUTES (TREE_TYPE (decl));
26980 /* Use whatever attributes a normal TM store has. */
26981 decl = builtin_decl_explicit (BUILT_IN_TM_STORE_1);
26982 attrs_store = DECL_ATTRIBUTES (decl);
26983 attrs_type_store = TYPE_ATTRIBUTES (TREE_TYPE (decl));
26984 /* Use whatever attributes a normal TM log has. */
26985 decl = builtin_decl_explicit (BUILT_IN_TM_LOG);
26986 attrs_log = DECL_ATTRIBUTES (decl);
26987 attrs_type_log = TYPE_ATTRIBUTES (TREE_TYPE (decl));
26989 for (i = 0, d = bdesc_tm;
26990 i < ARRAY_SIZE (bdesc_tm);
26993 if ((d->mask & ix86_isa_flags) != 0
26994 || (lang_hooks.builtin_function
26995 == lang_hooks.builtin_function_ext_scope))
26997 tree type, attrs, attrs_type;
26998 enum built_in_function code = (enum built_in_function) d->code;
27000 ftype = (enum ix86_builtin_func_type) d->flag;
27001 type = ix86_get_builtin_func_type (ftype);
27003 if (BUILTIN_TM_LOAD_P (code))
27005 attrs = attrs_load;
27006 attrs_type = attrs_type_load;
27008 else if (BUILTIN_TM_STORE_P (code))
27010 attrs = attrs_store;
27011 attrs_type = attrs_type_store;
27016 attrs_type = attrs_type_log;
27018 decl = add_builtin_function (d->name, type, code, BUILT_IN_NORMAL,
27019 /* The builtin without the prefix for
27020 calling it directly. */
27021 d->name + strlen ("__builtin_"),
27023 /* add_builtin_function() will set the DECL_ATTRIBUTES, now
27024 set the TYPE_ATTRIBUTES. */
27025 decl_attributes (&TREE_TYPE (decl), attrs_type, ATTR_FLAG_BUILT_IN);
27027 set_builtin_decl (code, decl, false);
27032 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
27033 in the current target ISA to allow the user to compile particular modules
27034 with different target specific options that differ from the command line
27037 ix86_init_mmx_sse_builtins (void)
27039 const struct builtin_description * d;
27040 enum ix86_builtin_func_type ftype;
27043 /* Add all special builtins with variable number of operands. */
27044 for (i = 0, d = bdesc_special_args;
27045 i < ARRAY_SIZE (bdesc_special_args);
27051 ftype = (enum ix86_builtin_func_type) d->flag;
27052 def_builtin (d->mask, d->name, ftype, d->code);
27055 /* Add all builtins with variable number of operands. */
27056 for (i = 0, d = bdesc_args;
27057 i < ARRAY_SIZE (bdesc_args);
27063 ftype = (enum ix86_builtin_func_type) d->flag;
27064 def_builtin_const (d->mask, d->name, ftype, d->code);
27067 /* pcmpestr[im] insns. */
27068 for (i = 0, d = bdesc_pcmpestr;
27069 i < ARRAY_SIZE (bdesc_pcmpestr);
27072 if (d->code == IX86_BUILTIN_PCMPESTRM128)
27073 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
27075 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
27076 def_builtin_const (d->mask, d->name, ftype, d->code);
27079 /* pcmpistr[im] insns. */
27080 for (i = 0, d = bdesc_pcmpistr;
27081 i < ARRAY_SIZE (bdesc_pcmpistr);
27084 if (d->code == IX86_BUILTIN_PCMPISTRM128)
27085 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
27087 ftype = INT_FTYPE_V16QI_V16QI_INT;
27088 def_builtin_const (d->mask, d->name, ftype, d->code);
27091 /* comi/ucomi insns. */
27092 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
27094 if (d->mask == OPTION_MASK_ISA_SSE2)
27095 ftype = INT_FTYPE_V2DF_V2DF;
27097 ftype = INT_FTYPE_V4SF_V4SF;
27098 def_builtin_const (d->mask, d->name, ftype, d->code);
27102 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
27103 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
27104 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
27105 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
27107 /* SSE or 3DNow!A */
27108 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
27109 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
27110 IX86_BUILTIN_MASKMOVQ);
27113 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
27114 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
27116 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
27117 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
27118 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
27119 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
27122 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
27123 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
27124 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
27125 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
27128 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
27129 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
27130 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
27131 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
27132 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
27133 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
27134 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
27135 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
27136 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
27137 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
27138 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
27139 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
27142 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
27143 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
27146 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand16_step",
27147 INT_FTYPE_PUSHORT, IX86_BUILTIN_RDRAND16_STEP);
27148 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand32_step",
27149 INT_FTYPE_PUNSIGNED, IX86_BUILTIN_RDRAND32_STEP);
27150 def_builtin (OPTION_MASK_ISA_RDRND | OPTION_MASK_ISA_64BIT,
27151 "__builtin_ia32_rdrand64_step", INT_FTYPE_PULONGLONG,
27152 IX86_BUILTIN_RDRAND64_STEP);
27155 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gathersiv2df",
27156 V2DF_FTYPE_V2DF_PCDOUBLE_V4SI_V2DF_INT,
27157 IX86_BUILTIN_GATHERSIV2DF);
27159 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gathersiv4df",
27160 V4DF_FTYPE_V4DF_PCDOUBLE_V4SI_V4DF_INT,
27161 IX86_BUILTIN_GATHERSIV4DF);
27163 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatherdiv2df",
27164 V2DF_FTYPE_V2DF_PCDOUBLE_V2DI_V2DF_INT,
27165 IX86_BUILTIN_GATHERDIV2DF);
27167 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatherdiv4df",
27168 V4DF_FTYPE_V4DF_PCDOUBLE_V4DI_V4DF_INT,
27169 IX86_BUILTIN_GATHERDIV4DF);
27171 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gathersiv4sf",
27172 V4SF_FTYPE_V4SF_PCFLOAT_V4SI_V4SF_INT,
27173 IX86_BUILTIN_GATHERSIV4SF);
27175 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gathersiv8sf",
27176 V8SF_FTYPE_V8SF_PCFLOAT_V8SI_V8SF_INT,
27177 IX86_BUILTIN_GATHERSIV8SF);
27179 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatherdiv4sf",
27180 V4SF_FTYPE_V4SF_PCFLOAT_V2DI_V4SF_INT,
27181 IX86_BUILTIN_GATHERDIV4SF);
27183 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatherdiv4sf256",
27184 V4SF_FTYPE_V4SF_PCFLOAT_V4DI_V4SF_INT,
27185 IX86_BUILTIN_GATHERDIV8SF);
27187 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gathersiv2di",
27188 V2DI_FTYPE_V2DI_PCINT64_V4SI_V2DI_INT,
27189 IX86_BUILTIN_GATHERSIV2DI);
27191 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gathersiv4di",
27192 V4DI_FTYPE_V4DI_PCINT64_V4SI_V4DI_INT,
27193 IX86_BUILTIN_GATHERSIV4DI);
27195 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatherdiv2di",
27196 V2DI_FTYPE_V2DI_PCINT64_V2DI_V2DI_INT,
27197 IX86_BUILTIN_GATHERDIV2DI);
27199 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatherdiv4di",
27200 V4DI_FTYPE_V4DI_PCINT64_V4DI_V4DI_INT,
27201 IX86_BUILTIN_GATHERDIV4DI);
27203 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gathersiv4si",
27204 V4SI_FTYPE_V4SI_PCINT_V4SI_V4SI_INT,
27205 IX86_BUILTIN_GATHERSIV4SI);
27207 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gathersiv8si",
27208 V8SI_FTYPE_V8SI_PCINT_V8SI_V8SI_INT,
27209 IX86_BUILTIN_GATHERSIV8SI);
27211 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatherdiv4si",
27212 V4SI_FTYPE_V4SI_PCINT_V2DI_V4SI_INT,
27213 IX86_BUILTIN_GATHERDIV4SI);
27215 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatherdiv4si256",
27216 V4SI_FTYPE_V4SI_PCINT_V4DI_V4SI_INT,
27217 IX86_BUILTIN_GATHERDIV8SI);
27219 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatheraltsiv4df ",
27220 V4DF_FTYPE_V4DF_PCDOUBLE_V8SI_V4DF_INT,
27221 IX86_BUILTIN_GATHERALTSIV4DF);
27223 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatheraltdiv4sf256 ",
27224 V8SF_FTYPE_V8SF_PCFLOAT_V4DI_V8SF_INT,
27225 IX86_BUILTIN_GATHERALTDIV8SF);
27227 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatheraltsiv4di ",
27228 V4DI_FTYPE_V4DI_PCINT64_V8SI_V4DI_INT,
27229 IX86_BUILTIN_GATHERALTSIV4DI);
27231 def_builtin (OPTION_MASK_ISA_AVX2, "__builtin_ia32_gatheraltdiv4si256 ",
27232 V8SI_FTYPE_V8SI_PCINT_V4DI_V8SI_INT,
27233 IX86_BUILTIN_GATHERALTDIV8SI);
27235 /* MMX access to the vec_init patterns. */
27236 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
27237 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
27239 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
27240 V4HI_FTYPE_HI_HI_HI_HI,
27241 IX86_BUILTIN_VEC_INIT_V4HI);
27243 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
27244 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
27245 IX86_BUILTIN_VEC_INIT_V8QI);
27247 /* Access to the vec_extract patterns. */
27248 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
27249 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
27250 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
27251 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
27252 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
27253 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
27254 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
27255 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
27256 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
27257 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
27259 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
27260 "__builtin_ia32_vec_ext_v4hi",
27261 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
27263 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
27264 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
27266 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
27267 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
27269 /* Access to the vec_set patterns. */
27270 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
27271 "__builtin_ia32_vec_set_v2di",
27272 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
27274 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
27275 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
27277 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
27278 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
27280 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
27281 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
27283 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
27284 "__builtin_ia32_vec_set_v4hi",
27285 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
27287 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
27288 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
27290 /* Add FMA4 multi-arg argument instructions */
27291 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
27296 ftype = (enum ix86_builtin_func_type) d->flag;
27297 def_builtin_const (d->mask, d->name, ftype, d->code);
27301 /* Internal method for ix86_init_builtins. */
27304 ix86_init_builtins_va_builtins_abi (void)
27306 tree ms_va_ref, sysv_va_ref;
27307 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
27308 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
27309 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
27310 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
27314 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
27315 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
27316 ms_va_ref = build_reference_type (ms_va_list_type_node);
27318 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
27321 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
27322 fnvoid_va_start_ms =
27323 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
27324 fnvoid_va_end_sysv =
27325 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
27326 fnvoid_va_start_sysv =
27327 build_varargs_function_type_list (void_type_node, sysv_va_ref,
27329 fnvoid_va_copy_ms =
27330 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
27332 fnvoid_va_copy_sysv =
27333 build_function_type_list (void_type_node, sysv_va_ref,
27334 sysv_va_ref, NULL_TREE);
27336 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
27337 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
27338 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
27339 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
27340 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
27341 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
27342 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
27343 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
27344 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
27345 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
27346 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
27347 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
27351 ix86_init_builtin_types (void)
27353 tree float128_type_node, float80_type_node;
27355 /* The __float80 type. */
27356 float80_type_node = long_double_type_node;
27357 if (TYPE_MODE (float80_type_node) != XFmode)
27359 /* The __float80 type. */
27360 float80_type_node = make_node (REAL_TYPE);
27362 TYPE_PRECISION (float80_type_node) = 80;
27363 layout_type (float80_type_node);
27365 lang_hooks.types.register_builtin_type (float80_type_node, "__float80");
27367 /* The __float128 type. */
27368 float128_type_node = make_node (REAL_TYPE);
27369 TYPE_PRECISION (float128_type_node) = 128;
27370 layout_type (float128_type_node);
27371 lang_hooks.types.register_builtin_type (float128_type_node, "__float128");
27373 /* This macro is built by i386-builtin-types.awk. */
27374 DEFINE_BUILTIN_PRIMITIVE_TYPES;
27378 ix86_init_builtins (void)
27382 ix86_init_builtin_types ();
27384 /* TFmode support builtins. */
27385 def_builtin_const (0, "__builtin_infq",
27386 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
27387 def_builtin_const (0, "__builtin_huge_valq",
27388 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
27390 /* We will expand them to normal call if SSE2 isn't available since
27391 they are used by libgcc. */
27392 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
27393 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
27394 BUILT_IN_MD, "__fabstf2", NULL_TREE);
27395 TREE_READONLY (t) = 1;
27396 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
27398 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
27399 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
27400 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
27401 TREE_READONLY (t) = 1;
27402 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
27404 ix86_init_tm_builtins ();
27405 ix86_init_mmx_sse_builtins ();
27408 ix86_init_builtins_va_builtins_abi ();
27410 #ifdef SUBTARGET_INIT_BUILTINS
27411 SUBTARGET_INIT_BUILTINS;
27415 /* Return the ix86 builtin for CODE. */
27418 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
27420 if (code >= IX86_BUILTIN_MAX)
27421 return error_mark_node;
27423 return ix86_builtins[code];
27426 /* Errors in the source file can cause expand_expr to return const0_rtx
27427 where we expect a vector. To avoid crashing, use one of the vector
27428 clear instructions. */
27430 safe_vector_operand (rtx x, enum machine_mode mode)
27432 if (x == const0_rtx)
27433 x = CONST0_RTX (mode);
27437 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
27440 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
27443 tree arg0 = CALL_EXPR_ARG (exp, 0);
27444 tree arg1 = CALL_EXPR_ARG (exp, 1);
27445 rtx op0 = expand_normal (arg0);
27446 rtx op1 = expand_normal (arg1);
27447 enum machine_mode tmode = insn_data[icode].operand[0].mode;
27448 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
27449 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
27451 if (VECTOR_MODE_P (mode0))
27452 op0 = safe_vector_operand (op0, mode0);
27453 if (VECTOR_MODE_P (mode1))
27454 op1 = safe_vector_operand (op1, mode1);
27456 if (optimize || !target
27457 || GET_MODE (target) != tmode
27458 || !insn_data[icode].operand[0].predicate (target, tmode))
27459 target = gen_reg_rtx (tmode);
27461 if (GET_MODE (op1) == SImode && mode1 == TImode)
27463 rtx x = gen_reg_rtx (V4SImode);
27464 emit_insn (gen_sse2_loadd (x, op1));
27465 op1 = gen_lowpart (TImode, x);
27468 if (!insn_data[icode].operand[1].predicate (op0, mode0))
27469 op0 = copy_to_mode_reg (mode0, op0);
27470 if (!insn_data[icode].operand[2].predicate (op1, mode1))
27471 op1 = copy_to_mode_reg (mode1, op1);
27473 pat = GEN_FCN (icode) (target, op0, op1);
27482 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
27485 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
27486 enum ix86_builtin_func_type m_type,
27487 enum rtx_code sub_code)
27492 bool comparison_p = false;
27494 bool last_arg_constant = false;
27495 int num_memory = 0;
27498 enum machine_mode mode;
27501 enum machine_mode tmode = insn_data[icode].operand[0].mode;
27505 case MULTI_ARG_4_DF2_DI_I:
27506 case MULTI_ARG_4_DF2_DI_I1:
27507 case MULTI_ARG_4_SF2_SI_I:
27508 case MULTI_ARG_4_SF2_SI_I1:
27510 last_arg_constant = true;
27513 case MULTI_ARG_3_SF:
27514 case MULTI_ARG_3_DF:
27515 case MULTI_ARG_3_SF2:
27516 case MULTI_ARG_3_DF2:
27517 case MULTI_ARG_3_DI:
27518 case MULTI_ARG_3_SI:
27519 case MULTI_ARG_3_SI_DI:
27520 case MULTI_ARG_3_HI:
27521 case MULTI_ARG_3_HI_SI:
27522 case MULTI_ARG_3_QI:
27523 case MULTI_ARG_3_DI2:
27524 case MULTI_ARG_3_SI2:
27525 case MULTI_ARG_3_HI2:
27526 case MULTI_ARG_3_QI2:
27530 case MULTI_ARG_2_SF:
27531 case MULTI_ARG_2_DF:
27532 case MULTI_ARG_2_DI:
27533 case MULTI_ARG_2_SI:
27534 case MULTI_ARG_2_HI:
27535 case MULTI_ARG_2_QI:
27539 case MULTI_ARG_2_DI_IMM:
27540 case MULTI_ARG_2_SI_IMM:
27541 case MULTI_ARG_2_HI_IMM:
27542 case MULTI_ARG_2_QI_IMM:
27544 last_arg_constant = true;
27547 case MULTI_ARG_1_SF:
27548 case MULTI_ARG_1_DF:
27549 case MULTI_ARG_1_SF2:
27550 case MULTI_ARG_1_DF2:
27551 case MULTI_ARG_1_DI:
27552 case MULTI_ARG_1_SI:
27553 case MULTI_ARG_1_HI:
27554 case MULTI_ARG_1_QI:
27555 case MULTI_ARG_1_SI_DI:
27556 case MULTI_ARG_1_HI_DI:
27557 case MULTI_ARG_1_HI_SI:
27558 case MULTI_ARG_1_QI_DI:
27559 case MULTI_ARG_1_QI_SI:
27560 case MULTI_ARG_1_QI_HI:
27564 case MULTI_ARG_2_DI_CMP:
27565 case MULTI_ARG_2_SI_CMP:
27566 case MULTI_ARG_2_HI_CMP:
27567 case MULTI_ARG_2_QI_CMP:
27569 comparison_p = true;
27572 case MULTI_ARG_2_SF_TF:
27573 case MULTI_ARG_2_DF_TF:
27574 case MULTI_ARG_2_DI_TF:
27575 case MULTI_ARG_2_SI_TF:
27576 case MULTI_ARG_2_HI_TF:
27577 case MULTI_ARG_2_QI_TF:
27583 gcc_unreachable ();
27586 if (optimize || !target
27587 || GET_MODE (target) != tmode
27588 || !insn_data[icode].operand[0].predicate (target, tmode))
27589 target = gen_reg_rtx (tmode);
27591 gcc_assert (nargs <= 4);
27593 for (i = 0; i < nargs; i++)
27595 tree arg = CALL_EXPR_ARG (exp, i);
27596 rtx op = expand_normal (arg);
27597 int adjust = (comparison_p) ? 1 : 0;
27598 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
27600 if (last_arg_constant && i == nargs - 1)
27602 if (!insn_data[icode].operand[i + 1].predicate (op, mode))
27604 enum insn_code new_icode = icode;
27607 case CODE_FOR_xop_vpermil2v2df3:
27608 case CODE_FOR_xop_vpermil2v4sf3:
27609 case CODE_FOR_xop_vpermil2v4df3:
27610 case CODE_FOR_xop_vpermil2v8sf3:
27611 error ("the last argument must be a 2-bit immediate");
27612 return gen_reg_rtx (tmode);
27613 case CODE_FOR_xop_rotlv2di3:
27614 new_icode = CODE_FOR_rotlv2di3;
27616 case CODE_FOR_xop_rotlv4si3:
27617 new_icode = CODE_FOR_rotlv4si3;
27619 case CODE_FOR_xop_rotlv8hi3:
27620 new_icode = CODE_FOR_rotlv8hi3;
27622 case CODE_FOR_xop_rotlv16qi3:
27623 new_icode = CODE_FOR_rotlv16qi3;
27625 if (CONST_INT_P (op))
27627 int mask = GET_MODE_BITSIZE (GET_MODE_INNER (tmode)) - 1;
27628 op = GEN_INT (INTVAL (op) & mask);
27629 gcc_checking_assert
27630 (insn_data[icode].operand[i + 1].predicate (op, mode));
27634 gcc_checking_assert
27636 && insn_data[new_icode].operand[0].mode == tmode
27637 && insn_data[new_icode].operand[1].mode == tmode
27638 && insn_data[new_icode].operand[2].mode == mode
27639 && insn_data[new_icode].operand[0].predicate
27640 == insn_data[icode].operand[0].predicate
27641 && insn_data[new_icode].operand[1].predicate
27642 == insn_data[icode].operand[1].predicate);
27648 gcc_unreachable ();
27655 if (VECTOR_MODE_P (mode))
27656 op = safe_vector_operand (op, mode);
27658 /* If we aren't optimizing, only allow one memory operand to be
27660 if (memory_operand (op, mode))
27663 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
27666 || !insn_data[icode].operand[i+adjust+1].predicate (op, mode)
27668 op = force_reg (mode, op);
27672 args[i].mode = mode;
27678 pat = GEN_FCN (icode) (target, args[0].op);
27683 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
27684 GEN_INT ((int)sub_code));
27685 else if (! comparison_p)
27686 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
27689 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
27693 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
27698 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
27702 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
27706 gcc_unreachable ();
27716 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
27717 insns with vec_merge. */
27720 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
27724 tree arg0 = CALL_EXPR_ARG (exp, 0);
27725 rtx op1, op0 = expand_normal (arg0);
27726 enum machine_mode tmode = insn_data[icode].operand[0].mode;
27727 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
27729 if (optimize || !target
27730 || GET_MODE (target) != tmode
27731 || !insn_data[icode].operand[0].predicate (target, tmode))
27732 target = gen_reg_rtx (tmode);
27734 if (VECTOR_MODE_P (mode0))
27735 op0 = safe_vector_operand (op0, mode0);
27737 if ((optimize && !register_operand (op0, mode0))
27738 || !insn_data[icode].operand[1].predicate (op0, mode0))
27739 op0 = copy_to_mode_reg (mode0, op0);
27742 if (!insn_data[icode].operand[2].predicate (op1, mode0))
27743 op1 = copy_to_mode_reg (mode0, op1);
27745 pat = GEN_FCN (icode) (target, op0, op1);
27752 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
27755 ix86_expand_sse_compare (const struct builtin_description *d,
27756 tree exp, rtx target, bool swap)
27759 tree arg0 = CALL_EXPR_ARG (exp, 0);
27760 tree arg1 = CALL_EXPR_ARG (exp, 1);
27761 rtx op0 = expand_normal (arg0);
27762 rtx op1 = expand_normal (arg1);
27764 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
27765 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
27766 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
27767 enum rtx_code comparison = d->comparison;
27769 if (VECTOR_MODE_P (mode0))
27770 op0 = safe_vector_operand (op0, mode0);
27771 if (VECTOR_MODE_P (mode1))
27772 op1 = safe_vector_operand (op1, mode1);
27774 /* Swap operands if we have a comparison that isn't available in
27778 rtx tmp = gen_reg_rtx (mode1);
27779 emit_move_insn (tmp, op1);
27784 if (optimize || !target
27785 || GET_MODE (target) != tmode
27786 || !insn_data[d->icode].operand[0].predicate (target, tmode))
27787 target = gen_reg_rtx (tmode);
27789 if ((optimize && !register_operand (op0, mode0))
27790 || !insn_data[d->icode].operand[1].predicate (op0, mode0))
27791 op0 = copy_to_mode_reg (mode0, op0);
27792 if ((optimize && !register_operand (op1, mode1))
27793 || !insn_data[d->icode].operand[2].predicate (op1, mode1))
27794 op1 = copy_to_mode_reg (mode1, op1);
27796 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
27797 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
27804 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
27807 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
27811 tree arg0 = CALL_EXPR_ARG (exp, 0);
27812 tree arg1 = CALL_EXPR_ARG (exp, 1);
27813 rtx op0 = expand_normal (arg0);
27814 rtx op1 = expand_normal (arg1);
27815 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
27816 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
27817 enum rtx_code comparison = d->comparison;
27819 if (VECTOR_MODE_P (mode0))
27820 op0 = safe_vector_operand (op0, mode0);
27821 if (VECTOR_MODE_P (mode1))
27822 op1 = safe_vector_operand (op1, mode1);
27824 /* Swap operands if we have a comparison that isn't available in
27826 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
27833 target = gen_reg_rtx (SImode);
27834 emit_move_insn (target, const0_rtx);
27835 target = gen_rtx_SUBREG (QImode, target, 0);
27837 if ((optimize && !register_operand (op0, mode0))
27838 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
27839 op0 = copy_to_mode_reg (mode0, op0);
27840 if ((optimize && !register_operand (op1, mode1))
27841 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
27842 op1 = copy_to_mode_reg (mode1, op1);
27844 pat = GEN_FCN (d->icode) (op0, op1);
27848 emit_insn (gen_rtx_SET (VOIDmode,
27849 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
27850 gen_rtx_fmt_ee (comparison, QImode,
27854 return SUBREG_REG (target);
27857 /* Subroutine of ix86_expand_args_builtin to take care of round insns. */
27860 ix86_expand_sse_round (const struct builtin_description *d, tree exp,
27864 tree arg0 = CALL_EXPR_ARG (exp, 0);
27865 rtx op1, op0 = expand_normal (arg0);
27866 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
27867 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
27869 if (optimize || target == 0
27870 || GET_MODE (target) != tmode
27871 || !insn_data[d->icode].operand[0].predicate (target, tmode))
27872 target = gen_reg_rtx (tmode);
27874 if (VECTOR_MODE_P (mode0))
27875 op0 = safe_vector_operand (op0, mode0);
27877 if ((optimize && !register_operand (op0, mode0))
27878 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
27879 op0 = copy_to_mode_reg (mode0, op0);
27881 op1 = GEN_INT (d->comparison);
27883 pat = GEN_FCN (d->icode) (target, op0, op1);
27890 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
27893 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
27897 tree arg0 = CALL_EXPR_ARG (exp, 0);
27898 tree arg1 = CALL_EXPR_ARG (exp, 1);
27899 rtx op0 = expand_normal (arg0);
27900 rtx op1 = expand_normal (arg1);
27901 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
27902 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
27903 enum rtx_code comparison = d->comparison;
27905 if (VECTOR_MODE_P (mode0))
27906 op0 = safe_vector_operand (op0, mode0);
27907 if (VECTOR_MODE_P (mode1))
27908 op1 = safe_vector_operand (op1, mode1);
27910 target = gen_reg_rtx (SImode);
27911 emit_move_insn (target, const0_rtx);
27912 target = gen_rtx_SUBREG (QImode, target, 0);
27914 if ((optimize && !register_operand (op0, mode0))
27915 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
27916 op0 = copy_to_mode_reg (mode0, op0);
27917 if ((optimize && !register_operand (op1, mode1))
27918 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
27919 op1 = copy_to_mode_reg (mode1, op1);
27921 pat = GEN_FCN (d->icode) (op0, op1);
27925 emit_insn (gen_rtx_SET (VOIDmode,
27926 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
27927 gen_rtx_fmt_ee (comparison, QImode,
27931 return SUBREG_REG (target);
27934 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
27937 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
27938 tree exp, rtx target)
27941 tree arg0 = CALL_EXPR_ARG (exp, 0);
27942 tree arg1 = CALL_EXPR_ARG (exp, 1);
27943 tree arg2 = CALL_EXPR_ARG (exp, 2);
27944 tree arg3 = CALL_EXPR_ARG (exp, 3);
27945 tree arg4 = CALL_EXPR_ARG (exp, 4);
27946 rtx scratch0, scratch1;
27947 rtx op0 = expand_normal (arg0);
27948 rtx op1 = expand_normal (arg1);
27949 rtx op2 = expand_normal (arg2);
27950 rtx op3 = expand_normal (arg3);
27951 rtx op4 = expand_normal (arg4);
27952 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
27954 tmode0 = insn_data[d->icode].operand[0].mode;
27955 tmode1 = insn_data[d->icode].operand[1].mode;
27956 modev2 = insn_data[d->icode].operand[2].mode;
27957 modei3 = insn_data[d->icode].operand[3].mode;
27958 modev4 = insn_data[d->icode].operand[4].mode;
27959 modei5 = insn_data[d->icode].operand[5].mode;
27960 modeimm = insn_data[d->icode].operand[6].mode;
27962 if (VECTOR_MODE_P (modev2))
27963 op0 = safe_vector_operand (op0, modev2);
27964 if (VECTOR_MODE_P (modev4))
27965 op2 = safe_vector_operand (op2, modev4);
27967 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
27968 op0 = copy_to_mode_reg (modev2, op0);
27969 if (!insn_data[d->icode].operand[3].predicate (op1, modei3))
27970 op1 = copy_to_mode_reg (modei3, op1);
27971 if ((optimize && !register_operand (op2, modev4))
27972 || !insn_data[d->icode].operand[4].predicate (op2, modev4))
27973 op2 = copy_to_mode_reg (modev4, op2);
27974 if (!insn_data[d->icode].operand[5].predicate (op3, modei5))
27975 op3 = copy_to_mode_reg (modei5, op3);
27977 if (!insn_data[d->icode].operand[6].predicate (op4, modeimm))
27979 error ("the fifth argument must be an 8-bit immediate");
27983 if (d->code == IX86_BUILTIN_PCMPESTRI128)
27985 if (optimize || !target
27986 || GET_MODE (target) != tmode0
27987 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
27988 target = gen_reg_rtx (tmode0);
27990 scratch1 = gen_reg_rtx (tmode1);
27992 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
27994 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
27996 if (optimize || !target
27997 || GET_MODE (target) != tmode1
27998 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
27999 target = gen_reg_rtx (tmode1);
28001 scratch0 = gen_reg_rtx (tmode0);
28003 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
28007 gcc_assert (d->flag);
28009 scratch0 = gen_reg_rtx (tmode0);
28010 scratch1 = gen_reg_rtx (tmode1);
28012 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
28022 target = gen_reg_rtx (SImode);
28023 emit_move_insn (target, const0_rtx);
28024 target = gen_rtx_SUBREG (QImode, target, 0);
28027 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
28028 gen_rtx_fmt_ee (EQ, QImode,
28029 gen_rtx_REG ((enum machine_mode) d->flag,
28032 return SUBREG_REG (target);
28039 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
28042 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
28043 tree exp, rtx target)
28046 tree arg0 = CALL_EXPR_ARG (exp, 0);
28047 tree arg1 = CALL_EXPR_ARG (exp, 1);
28048 tree arg2 = CALL_EXPR_ARG (exp, 2);
28049 rtx scratch0, scratch1;
28050 rtx op0 = expand_normal (arg0);
28051 rtx op1 = expand_normal (arg1);
28052 rtx op2 = expand_normal (arg2);
28053 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
28055 tmode0 = insn_data[d->icode].operand[0].mode;
28056 tmode1 = insn_data[d->icode].operand[1].mode;
28057 modev2 = insn_data[d->icode].operand[2].mode;
28058 modev3 = insn_data[d->icode].operand[3].mode;
28059 modeimm = insn_data[d->icode].operand[4].mode;
28061 if (VECTOR_MODE_P (modev2))
28062 op0 = safe_vector_operand (op0, modev2);
28063 if (VECTOR_MODE_P (modev3))
28064 op1 = safe_vector_operand (op1, modev3);
28066 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
28067 op0 = copy_to_mode_reg (modev2, op0);
28068 if ((optimize && !register_operand (op1, modev3))
28069 || !insn_data[d->icode].operand[3].predicate (op1, modev3))
28070 op1 = copy_to_mode_reg (modev3, op1);
28072 if (!insn_data[d->icode].operand[4].predicate (op2, modeimm))
28074 error ("the third argument must be an 8-bit immediate");
28078 if (d->code == IX86_BUILTIN_PCMPISTRI128)
28080 if (optimize || !target
28081 || GET_MODE (target) != tmode0
28082 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
28083 target = gen_reg_rtx (tmode0);
28085 scratch1 = gen_reg_rtx (tmode1);
28087 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
28089 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
28091 if (optimize || !target
28092 || GET_MODE (target) != tmode1
28093 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
28094 target = gen_reg_rtx (tmode1);
28096 scratch0 = gen_reg_rtx (tmode0);
28098 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
28102 gcc_assert (d->flag);
28104 scratch0 = gen_reg_rtx (tmode0);
28105 scratch1 = gen_reg_rtx (tmode1);
28107 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
28117 target = gen_reg_rtx (SImode);
28118 emit_move_insn (target, const0_rtx);
28119 target = gen_rtx_SUBREG (QImode, target, 0);
28122 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
28123 gen_rtx_fmt_ee (EQ, QImode,
28124 gen_rtx_REG ((enum machine_mode) d->flag,
28127 return SUBREG_REG (target);
28133 /* Subroutine of ix86_expand_builtin to take care of insns with
28134 variable number of operands. */
28137 ix86_expand_args_builtin (const struct builtin_description *d,
28138 tree exp, rtx target)
28140 rtx pat, real_target;
28141 unsigned int i, nargs;
28142 unsigned int nargs_constant = 0;
28143 int num_memory = 0;
28147 enum machine_mode mode;
28149 bool last_arg_count = false;
28150 enum insn_code icode = d->icode;
28151 const struct insn_data_d *insn_p = &insn_data[icode];
28152 enum machine_mode tmode = insn_p->operand[0].mode;
28153 enum machine_mode rmode = VOIDmode;
28155 enum rtx_code comparison = d->comparison;
28157 switch ((enum ix86_builtin_func_type) d->flag)
28159 case V2DF_FTYPE_V2DF_ROUND:
28160 case V4DF_FTYPE_V4DF_ROUND:
28161 case V4SF_FTYPE_V4SF_ROUND:
28162 case V8SF_FTYPE_V8SF_ROUND:
28163 return ix86_expand_sse_round (d, exp, target);
28164 case INT_FTYPE_V8SF_V8SF_PTEST:
28165 case INT_FTYPE_V4DI_V4DI_PTEST:
28166 case INT_FTYPE_V4DF_V4DF_PTEST:
28167 case INT_FTYPE_V4SF_V4SF_PTEST:
28168 case INT_FTYPE_V2DI_V2DI_PTEST:
28169 case INT_FTYPE_V2DF_V2DF_PTEST:
28170 return ix86_expand_sse_ptest (d, exp, target);
28171 case FLOAT128_FTYPE_FLOAT128:
28172 case FLOAT_FTYPE_FLOAT:
28173 case INT_FTYPE_INT:
28174 case UINT64_FTYPE_INT:
28175 case UINT16_FTYPE_UINT16:
28176 case INT64_FTYPE_INT64:
28177 case INT64_FTYPE_V4SF:
28178 case INT64_FTYPE_V2DF:
28179 case INT_FTYPE_V16QI:
28180 case INT_FTYPE_V8QI:
28181 case INT_FTYPE_V8SF:
28182 case INT_FTYPE_V4DF:
28183 case INT_FTYPE_V4SF:
28184 case INT_FTYPE_V2DF:
28185 case INT_FTYPE_V32QI:
28186 case V16QI_FTYPE_V16QI:
28187 case V8SI_FTYPE_V8SF:
28188 case V8SI_FTYPE_V4SI:
28189 case V8HI_FTYPE_V8HI:
28190 case V8HI_FTYPE_V16QI:
28191 case V8QI_FTYPE_V8QI:
28192 case V8SF_FTYPE_V8SF:
28193 case V8SF_FTYPE_V8SI:
28194 case V8SF_FTYPE_V4SF:
28195 case V8SF_FTYPE_V8HI:
28196 case V4SI_FTYPE_V4SI:
28197 case V4SI_FTYPE_V16QI:
28198 case V4SI_FTYPE_V4SF:
28199 case V4SI_FTYPE_V8SI:
28200 case V4SI_FTYPE_V8HI:
28201 case V4SI_FTYPE_V4DF:
28202 case V4SI_FTYPE_V2DF:
28203 case V4HI_FTYPE_V4HI:
28204 case V4DF_FTYPE_V4DF:
28205 case V4DF_FTYPE_V4SI:
28206 case V4DF_FTYPE_V4SF:
28207 case V4DF_FTYPE_V2DF:
28208 case V4SF_FTYPE_V4SF:
28209 case V4SF_FTYPE_V4SI:
28210 case V4SF_FTYPE_V8SF:
28211 case V4SF_FTYPE_V4DF:
28212 case V4SF_FTYPE_V8HI:
28213 case V4SF_FTYPE_V2DF:
28214 case V2DI_FTYPE_V2DI:
28215 case V2DI_FTYPE_V16QI:
28216 case V2DI_FTYPE_V8HI:
28217 case V2DI_FTYPE_V4SI:
28218 case V2DF_FTYPE_V2DF:
28219 case V2DF_FTYPE_V4SI:
28220 case V2DF_FTYPE_V4DF:
28221 case V2DF_FTYPE_V4SF:
28222 case V2DF_FTYPE_V2SI:
28223 case V2SI_FTYPE_V2SI:
28224 case V2SI_FTYPE_V4SF:
28225 case V2SI_FTYPE_V2SF:
28226 case V2SI_FTYPE_V2DF:
28227 case V2SF_FTYPE_V2SF:
28228 case V2SF_FTYPE_V2SI:
28229 case V32QI_FTYPE_V32QI:
28230 case V32QI_FTYPE_V16QI:
28231 case V16HI_FTYPE_V16HI:
28232 case V16HI_FTYPE_V8HI:
28233 case V8SI_FTYPE_V8SI:
28234 case V16HI_FTYPE_V16QI:
28235 case V8SI_FTYPE_V16QI:
28236 case V4DI_FTYPE_V16QI:
28237 case V8SI_FTYPE_V8HI:
28238 case V4DI_FTYPE_V8HI:
28239 case V4DI_FTYPE_V4SI:
28240 case V4DI_FTYPE_V2DI:
28243 case V4SF_FTYPE_V4SF_VEC_MERGE:
28244 case V2DF_FTYPE_V2DF_VEC_MERGE:
28245 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
28246 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
28247 case V16QI_FTYPE_V16QI_V16QI:
28248 case V16QI_FTYPE_V8HI_V8HI:
28249 case V8QI_FTYPE_V8QI_V8QI:
28250 case V8QI_FTYPE_V4HI_V4HI:
28251 case V8HI_FTYPE_V8HI_V8HI:
28252 case V8HI_FTYPE_V16QI_V16QI:
28253 case V8HI_FTYPE_V4SI_V4SI:
28254 case V8SF_FTYPE_V8SF_V8SF:
28255 case V8SF_FTYPE_V8SF_V8SI:
28256 case V4SI_FTYPE_V4SI_V4SI:
28257 case V4SI_FTYPE_V8HI_V8HI:
28258 case V4SI_FTYPE_V4SF_V4SF:
28259 case V4SI_FTYPE_V2DF_V2DF:
28260 case V4HI_FTYPE_V4HI_V4HI:
28261 case V4HI_FTYPE_V8QI_V8QI:
28262 case V4HI_FTYPE_V2SI_V2SI:
28263 case V4DF_FTYPE_V4DF_V4DF:
28264 case V4DF_FTYPE_V4DF_V4DI:
28265 case V4SF_FTYPE_V4SF_V4SF:
28266 case V4SF_FTYPE_V4SF_V4SI:
28267 case V4SF_FTYPE_V4SF_V2SI:
28268 case V4SF_FTYPE_V4SF_V2DF:
28269 case V4SF_FTYPE_V4SF_DI:
28270 case V4SF_FTYPE_V4SF_SI:
28271 case V2DI_FTYPE_V2DI_V2DI:
28272 case V2DI_FTYPE_V16QI_V16QI:
28273 case V2DI_FTYPE_V4SI_V4SI:
28274 case V2DI_FTYPE_V2DI_V16QI:
28275 case V2DI_FTYPE_V2DF_V2DF:
28276 case V2SI_FTYPE_V2SI_V2SI:
28277 case V2SI_FTYPE_V4HI_V4HI:
28278 case V2SI_FTYPE_V2SF_V2SF:
28279 case V2DF_FTYPE_V2DF_V2DF:
28280 case V2DF_FTYPE_V2DF_V4SF:
28281 case V2DF_FTYPE_V2DF_V2DI:
28282 case V2DF_FTYPE_V2DF_DI:
28283 case V2DF_FTYPE_V2DF_SI:
28284 case V2SF_FTYPE_V2SF_V2SF:
28285 case V1DI_FTYPE_V1DI_V1DI:
28286 case V1DI_FTYPE_V8QI_V8QI:
28287 case V1DI_FTYPE_V2SI_V2SI:
28288 case V32QI_FTYPE_V16HI_V16HI:
28289 case V16HI_FTYPE_V8SI_V8SI:
28290 case V32QI_FTYPE_V32QI_V32QI:
28291 case V16HI_FTYPE_V32QI_V32QI:
28292 case V16HI_FTYPE_V16HI_V16HI:
28293 case V8SI_FTYPE_V4DF_V4DF:
28294 case V8SI_FTYPE_V8SI_V8SI:
28295 case V8SI_FTYPE_V16HI_V16HI:
28296 case V4DI_FTYPE_V4DI_V4DI:
28297 case V4DI_FTYPE_V8SI_V8SI:
28298 if (comparison == UNKNOWN)
28299 return ix86_expand_binop_builtin (icode, exp, target);
28302 case V4SF_FTYPE_V4SF_V4SF_SWAP:
28303 case V2DF_FTYPE_V2DF_V2DF_SWAP:
28304 gcc_assert (comparison != UNKNOWN);
28308 case V16HI_FTYPE_V16HI_V8HI_COUNT:
28309 case V16HI_FTYPE_V16HI_SI_COUNT:
28310 case V8SI_FTYPE_V8SI_V4SI_COUNT:
28311 case V8SI_FTYPE_V8SI_SI_COUNT:
28312 case V4DI_FTYPE_V4DI_V2DI_COUNT:
28313 case V4DI_FTYPE_V4DI_INT_COUNT:
28314 case V8HI_FTYPE_V8HI_V8HI_COUNT:
28315 case V8HI_FTYPE_V8HI_SI_COUNT:
28316 case V4SI_FTYPE_V4SI_V4SI_COUNT:
28317 case V4SI_FTYPE_V4SI_SI_COUNT:
28318 case V4HI_FTYPE_V4HI_V4HI_COUNT:
28319 case V4HI_FTYPE_V4HI_SI_COUNT:
28320 case V2DI_FTYPE_V2DI_V2DI_COUNT:
28321 case V2DI_FTYPE_V2DI_SI_COUNT:
28322 case V2SI_FTYPE_V2SI_V2SI_COUNT:
28323 case V2SI_FTYPE_V2SI_SI_COUNT:
28324 case V1DI_FTYPE_V1DI_V1DI_COUNT:
28325 case V1DI_FTYPE_V1DI_SI_COUNT:
28327 last_arg_count = true;
28329 case UINT64_FTYPE_UINT64_UINT64:
28330 case UINT_FTYPE_UINT_UINT:
28331 case UINT_FTYPE_UINT_USHORT:
28332 case UINT_FTYPE_UINT_UCHAR:
28333 case UINT16_FTYPE_UINT16_INT:
28334 case UINT8_FTYPE_UINT8_INT:
28337 case V2DI_FTYPE_V2DI_INT_CONVERT:
28340 nargs_constant = 1;
28342 case V4DI_FTYPE_V4DI_INT_CONVERT:
28345 nargs_constant = 1;
28347 case V8HI_FTYPE_V8HI_INT:
28348 case V8HI_FTYPE_V8SF_INT:
28349 case V8HI_FTYPE_V4SF_INT:
28350 case V8SF_FTYPE_V8SF_INT:
28351 case V4SI_FTYPE_V4SI_INT:
28352 case V4SI_FTYPE_V8SI_INT:
28353 case V4HI_FTYPE_V4HI_INT:
28354 case V4DF_FTYPE_V4DF_INT:
28355 case V4SF_FTYPE_V4SF_INT:
28356 case V4SF_FTYPE_V8SF_INT:
28357 case V2DI_FTYPE_V2DI_INT:
28358 case V2DF_FTYPE_V2DF_INT:
28359 case V2DF_FTYPE_V4DF_INT:
28360 case V16HI_FTYPE_V16HI_INT:
28361 case V8SI_FTYPE_V8SI_INT:
28362 case V4DI_FTYPE_V4DI_INT:
28363 case V2DI_FTYPE_V4DI_INT:
28365 nargs_constant = 1;
28367 case V16QI_FTYPE_V16QI_V16QI_V16QI:
28368 case V8SF_FTYPE_V8SF_V8SF_V8SF:
28369 case V4DF_FTYPE_V4DF_V4DF_V4DF:
28370 case V4SF_FTYPE_V4SF_V4SF_V4SF:
28371 case V2DF_FTYPE_V2DF_V2DF_V2DF:
28372 case V32QI_FTYPE_V32QI_V32QI_V32QI:
28375 case V32QI_FTYPE_V32QI_V32QI_INT:
28376 case V16HI_FTYPE_V16HI_V16HI_INT:
28377 case V16QI_FTYPE_V16QI_V16QI_INT:
28378 case V4DI_FTYPE_V4DI_V4DI_INT:
28379 case V8HI_FTYPE_V8HI_V8HI_INT:
28380 case V8SI_FTYPE_V8SI_V8SI_INT:
28381 case V8SI_FTYPE_V8SI_V4SI_INT:
28382 case V8SF_FTYPE_V8SF_V8SF_INT:
28383 case V8SF_FTYPE_V8SF_V4SF_INT:
28384 case V4SI_FTYPE_V4SI_V4SI_INT:
28385 case V4DF_FTYPE_V4DF_V4DF_INT:
28386 case V4DF_FTYPE_V4DF_V2DF_INT:
28387 case V4SF_FTYPE_V4SF_V4SF_INT:
28388 case V2DI_FTYPE_V2DI_V2DI_INT:
28389 case V4DI_FTYPE_V4DI_V2DI_INT:
28390 case V2DF_FTYPE_V2DF_V2DF_INT:
28392 nargs_constant = 1;
28394 case V4DI_FTYPE_V4DI_V4DI_INT_CONVERT:
28397 nargs_constant = 1;
28399 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
28402 nargs_constant = 1;
28404 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
28407 nargs_constant = 1;
28409 case V2DI_FTYPE_V2DI_UINT_UINT:
28411 nargs_constant = 2;
28413 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
28414 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
28415 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
28416 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
28418 nargs_constant = 1;
28420 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
28422 nargs_constant = 2;
28425 gcc_unreachable ();
28428 gcc_assert (nargs <= ARRAY_SIZE (args));
28430 if (comparison != UNKNOWN)
28432 gcc_assert (nargs == 2);
28433 return ix86_expand_sse_compare (d, exp, target, swap);
28436 if (rmode == VOIDmode || rmode == tmode)
28440 || GET_MODE (target) != tmode
28441 || !insn_p->operand[0].predicate (target, tmode))
28442 target = gen_reg_rtx (tmode);
28443 real_target = target;
28447 target = gen_reg_rtx (rmode);
28448 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
28451 for (i = 0; i < nargs; i++)
28453 tree arg = CALL_EXPR_ARG (exp, i);
28454 rtx op = expand_normal (arg);
28455 enum machine_mode mode = insn_p->operand[i + 1].mode;
28456 bool match = insn_p->operand[i + 1].predicate (op, mode);
28458 if (last_arg_count && (i + 1) == nargs)
28460 /* SIMD shift insns take either an 8-bit immediate or
28461 register as count. But builtin functions take int as
28462 count. If count doesn't match, we put it in register. */
28465 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
28466 if (!insn_p->operand[i + 1].predicate (op, mode))
28467 op = copy_to_reg (op);
28470 else if ((nargs - i) <= nargs_constant)
28475 case CODE_FOR_avx2_inserti128:
28476 case CODE_FOR_avx2_extracti128:
28477 error ("the last argument must be an 1-bit immediate");
28480 case CODE_FOR_sse4_1_roundpd:
28481 case CODE_FOR_sse4_1_roundps:
28482 case CODE_FOR_sse4_1_roundsd:
28483 case CODE_FOR_sse4_1_roundss:
28484 case CODE_FOR_sse4_1_blendps:
28485 case CODE_FOR_avx_blendpd256:
28486 case CODE_FOR_avx_vpermilv4df:
28487 case CODE_FOR_avx_roundpd256:
28488 case CODE_FOR_avx_roundps256:
28489 error ("the last argument must be a 4-bit immediate");
28492 case CODE_FOR_sse4_1_blendpd:
28493 case CODE_FOR_avx_vpermilv2df:
28494 case CODE_FOR_xop_vpermil2v2df3:
28495 case CODE_FOR_xop_vpermil2v4sf3:
28496 case CODE_FOR_xop_vpermil2v4df3:
28497 case CODE_FOR_xop_vpermil2v8sf3:
28498 error ("the last argument must be a 2-bit immediate");
28501 case CODE_FOR_avx_vextractf128v4df:
28502 case CODE_FOR_avx_vextractf128v8sf:
28503 case CODE_FOR_avx_vextractf128v8si:
28504 case CODE_FOR_avx_vinsertf128v4df:
28505 case CODE_FOR_avx_vinsertf128v8sf:
28506 case CODE_FOR_avx_vinsertf128v8si:
28507 error ("the last argument must be a 1-bit immediate");
28510 case CODE_FOR_avx_vmcmpv2df3:
28511 case CODE_FOR_avx_vmcmpv4sf3:
28512 case CODE_FOR_avx_cmpv2df3:
28513 case CODE_FOR_avx_cmpv4sf3:
28514 case CODE_FOR_avx_cmpv4df3:
28515 case CODE_FOR_avx_cmpv8sf3:
28516 error ("the last argument must be a 5-bit immediate");
28520 switch (nargs_constant)
28523 if ((nargs - i) == nargs_constant)
28525 error ("the next to last argument must be an 8-bit immediate");
28529 error ("the last argument must be an 8-bit immediate");
28532 gcc_unreachable ();
28539 if (VECTOR_MODE_P (mode))
28540 op = safe_vector_operand (op, mode);
28542 /* If we aren't optimizing, only allow one memory operand to
28544 if (memory_operand (op, mode))
28547 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
28549 if (optimize || !match || num_memory > 1)
28550 op = copy_to_mode_reg (mode, op);
28554 op = copy_to_reg (op);
28555 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
28560 args[i].mode = mode;
28566 pat = GEN_FCN (icode) (real_target, args[0].op);
28569 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
28572 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
28576 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
28577 args[2].op, args[3].op);
28580 gcc_unreachable ();
28590 /* Subroutine of ix86_expand_builtin to take care of special insns
28591 with variable number of operands. */
28594 ix86_expand_special_args_builtin (const struct builtin_description *d,
28595 tree exp, rtx target)
28599 unsigned int i, nargs, arg_adjust, memory;
28603 enum machine_mode mode;
28605 enum insn_code icode = d->icode;
28606 bool last_arg_constant = false;
28607 const struct insn_data_d *insn_p = &insn_data[icode];
28608 enum machine_mode tmode = insn_p->operand[0].mode;
28609 enum { load, store } klass;
28611 switch ((enum ix86_builtin_func_type) d->flag)
28613 case VOID_FTYPE_VOID:
28614 if (icode == CODE_FOR_avx_vzeroupper)
28615 target = GEN_INT (vzeroupper_intrinsic);
28616 emit_insn (GEN_FCN (icode) (target));
28618 case VOID_FTYPE_UINT64:
28619 case VOID_FTYPE_UNSIGNED:
28624 case UINT64_FTYPE_VOID:
28625 case UNSIGNED_FTYPE_VOID:
28630 case UINT64_FTYPE_PUNSIGNED:
28631 case V2DI_FTYPE_PV2DI:
28632 case V4DI_FTYPE_PV4DI:
28633 case V32QI_FTYPE_PCCHAR:
28634 case V16QI_FTYPE_PCCHAR:
28635 case V8SF_FTYPE_PCV4SF:
28636 case V8SF_FTYPE_PCFLOAT:
28637 case V4SF_FTYPE_PCFLOAT:
28638 case V4DF_FTYPE_PCV2DF:
28639 case V4DF_FTYPE_PCDOUBLE:
28640 case V2DF_FTYPE_PCDOUBLE:
28641 case VOID_FTYPE_PVOID:
28646 case VOID_FTYPE_PV2SF_V4SF:
28647 case VOID_FTYPE_PV4DI_V4DI:
28648 case VOID_FTYPE_PV2DI_V2DI:
28649 case VOID_FTYPE_PCHAR_V32QI:
28650 case VOID_FTYPE_PCHAR_V16QI:
28651 case VOID_FTYPE_PFLOAT_V8SF:
28652 case VOID_FTYPE_PFLOAT_V4SF:
28653 case VOID_FTYPE_PDOUBLE_V4DF:
28654 case VOID_FTYPE_PDOUBLE_V2DF:
28655 case VOID_FTYPE_PULONGLONG_ULONGLONG:
28656 case VOID_FTYPE_PINT_INT:
28659 /* Reserve memory operand for target. */
28660 memory = ARRAY_SIZE (args);
28662 case V4SF_FTYPE_V4SF_PCV2SF:
28663 case V2DF_FTYPE_V2DF_PCDOUBLE:
28668 case V8SF_FTYPE_PCV8SF_V8SI:
28669 case V4DF_FTYPE_PCV4DF_V4DI:
28670 case V4SF_FTYPE_PCV4SF_V4SI:
28671 case V2DF_FTYPE_PCV2DF_V2DI:
28672 case V8SI_FTYPE_PCV8SI_V8SI:
28673 case V4DI_FTYPE_PCV4DI_V4DI:
28674 case V4SI_FTYPE_PCV4SI_V4SI:
28675 case V2DI_FTYPE_PCV2DI_V2DI:
28680 case VOID_FTYPE_PV8SF_V8SI_V8SF:
28681 case VOID_FTYPE_PV4DF_V4DI_V4DF:
28682 case VOID_FTYPE_PV4SF_V4SI_V4SF:
28683 case VOID_FTYPE_PV2DF_V2DI_V2DF:
28684 case VOID_FTYPE_PV8SI_V8SI_V8SI:
28685 case VOID_FTYPE_PV4DI_V4DI_V4DI:
28686 case VOID_FTYPE_PV4SI_V4SI_V4SI:
28687 case VOID_FTYPE_PV2DI_V2DI_V2DI:
28690 /* Reserve memory operand for target. */
28691 memory = ARRAY_SIZE (args);
28693 case VOID_FTYPE_UINT_UINT_UINT:
28694 case VOID_FTYPE_UINT64_UINT_UINT:
28695 case UCHAR_FTYPE_UINT_UINT_UINT:
28696 case UCHAR_FTYPE_UINT64_UINT_UINT:
28699 memory = ARRAY_SIZE (args);
28700 last_arg_constant = true;
28703 gcc_unreachable ();
28706 gcc_assert (nargs <= ARRAY_SIZE (args));
28708 if (klass == store)
28710 arg = CALL_EXPR_ARG (exp, 0);
28711 op = expand_normal (arg);
28712 gcc_assert (target == 0);
28715 if (GET_MODE (op) != Pmode)
28716 op = convert_to_mode (Pmode, op, 1);
28717 target = gen_rtx_MEM (tmode, force_reg (Pmode, op));
28720 target = force_reg (tmode, op);
28728 || GET_MODE (target) != tmode
28729 || !insn_p->operand[0].predicate (target, tmode))
28730 target = gen_reg_rtx (tmode);
28733 for (i = 0; i < nargs; i++)
28735 enum machine_mode mode = insn_p->operand[i + 1].mode;
28738 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
28739 op = expand_normal (arg);
28740 match = insn_p->operand[i + 1].predicate (op, mode);
28742 if (last_arg_constant && (i + 1) == nargs)
28746 if (icode == CODE_FOR_lwp_lwpvalsi3
28747 || icode == CODE_FOR_lwp_lwpinssi3
28748 || icode == CODE_FOR_lwp_lwpvaldi3
28749 || icode == CODE_FOR_lwp_lwpinsdi3)
28750 error ("the last argument must be a 32-bit immediate");
28752 error ("the last argument must be an 8-bit immediate");
28760 /* This must be the memory operand. */
28761 if (GET_MODE (op) != Pmode)
28762 op = convert_to_mode (Pmode, op, 1);
28763 op = gen_rtx_MEM (mode, force_reg (Pmode, op));
28764 gcc_assert (GET_MODE (op) == mode
28765 || GET_MODE (op) == VOIDmode);
28769 /* This must be register. */
28770 if (VECTOR_MODE_P (mode))
28771 op = safe_vector_operand (op, mode);
28773 gcc_assert (GET_MODE (op) == mode
28774 || GET_MODE (op) == VOIDmode);
28775 op = copy_to_mode_reg (mode, op);
28780 args[i].mode = mode;
28786 pat = GEN_FCN (icode) (target);
28789 pat = GEN_FCN (icode) (target, args[0].op);
28792 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
28795 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
28798 gcc_unreachable ();
28804 return klass == store ? 0 : target;
28807 /* Return the integer constant in ARG. Constrain it to be in the range
28808 of the subparts of VEC_TYPE; issue an error if not. */
28811 get_element_number (tree vec_type, tree arg)
28813 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
28815 if (!host_integerp (arg, 1)
28816 || (elt = tree_low_cst (arg, 1), elt > max))
28818 error ("selector must be an integer constant in the range 0..%wi", max);
28825 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
28826 ix86_expand_vector_init. We DO have language-level syntax for this, in
28827 the form of (type){ init-list }. Except that since we can't place emms
28828 instructions from inside the compiler, we can't allow the use of MMX
28829 registers unless the user explicitly asks for it. So we do *not* define
28830 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
28831 we have builtins invoked by mmintrin.h that gives us license to emit
28832 these sorts of instructions. */
28835 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
28837 enum machine_mode tmode = TYPE_MODE (type);
28838 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
28839 int i, n_elt = GET_MODE_NUNITS (tmode);
28840 rtvec v = rtvec_alloc (n_elt);
28842 gcc_assert (VECTOR_MODE_P (tmode));
28843 gcc_assert (call_expr_nargs (exp) == n_elt);
28845 for (i = 0; i < n_elt; ++i)
28847 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
28848 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
28851 if (!target || !register_operand (target, tmode))
28852 target = gen_reg_rtx (tmode);
28854 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
28858 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
28859 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
28860 had a language-level syntax for referencing vector elements. */
28863 ix86_expand_vec_ext_builtin (tree exp, rtx target)
28865 enum machine_mode tmode, mode0;
28870 arg0 = CALL_EXPR_ARG (exp, 0);
28871 arg1 = CALL_EXPR_ARG (exp, 1);
28873 op0 = expand_normal (arg0);
28874 elt = get_element_number (TREE_TYPE (arg0), arg1);
28876 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
28877 mode0 = TYPE_MODE (TREE_TYPE (arg0));
28878 gcc_assert (VECTOR_MODE_P (mode0));
28880 op0 = force_reg (mode0, op0);
28882 if (optimize || !target || !register_operand (target, tmode))
28883 target = gen_reg_rtx (tmode);
28885 ix86_expand_vector_extract (true, target, op0, elt);
28890 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
28891 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
28892 a language-level syntax for referencing vector elements. */
28895 ix86_expand_vec_set_builtin (tree exp)
28897 enum machine_mode tmode, mode1;
28898 tree arg0, arg1, arg2;
28900 rtx op0, op1, target;
28902 arg0 = CALL_EXPR_ARG (exp, 0);
28903 arg1 = CALL_EXPR_ARG (exp, 1);
28904 arg2 = CALL_EXPR_ARG (exp, 2);
28906 tmode = TYPE_MODE (TREE_TYPE (arg0));
28907 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
28908 gcc_assert (VECTOR_MODE_P (tmode));
28910 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
28911 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
28912 elt = get_element_number (TREE_TYPE (arg0), arg2);
28914 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
28915 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
28917 op0 = force_reg (tmode, op0);
28918 op1 = force_reg (mode1, op1);
28920 /* OP0 is the source of these builtin functions and shouldn't be
28921 modified. Create a copy, use it and return it as target. */
28922 target = gen_reg_rtx (tmode);
28923 emit_move_insn (target, op0);
28924 ix86_expand_vector_set (true, target, op1, elt);
28929 /* Expand an expression EXP that calls a built-in function,
28930 with result going to TARGET if that's convenient
28931 (and in mode MODE if that's convenient).
28932 SUBTARGET may be used as the target for computing one of EXP's operands.
28933 IGNORE is nonzero if the value is to be ignored. */
28936 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
28937 enum machine_mode mode ATTRIBUTE_UNUSED,
28938 int ignore ATTRIBUTE_UNUSED)
28940 const struct builtin_description *d;
28942 enum insn_code icode;
28943 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
28944 tree arg0, arg1, arg2, arg3, arg4;
28945 rtx op0, op1, op2, op3, op4, pat;
28946 enum machine_mode mode0, mode1, mode2, mode3, mode4;
28947 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
28949 /* Determine whether the builtin function is available under the current ISA.
28950 Originally the builtin was not created if it wasn't applicable to the
28951 current ISA based on the command line switches. With function specific
28952 options, we need to check in the context of the function making the call
28953 whether it is supported. */
28954 if (ix86_builtins_isa[fcode].isa
28955 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
28957 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
28958 NULL, (enum fpmath_unit) 0, false);
28961 error ("%qE needs unknown isa option", fndecl);
28964 gcc_assert (opts != NULL);
28965 error ("%qE needs isa option %s", fndecl, opts);
28973 case IX86_BUILTIN_MASKMOVQ:
28974 case IX86_BUILTIN_MASKMOVDQU:
28975 icode = (fcode == IX86_BUILTIN_MASKMOVQ
28976 ? CODE_FOR_mmx_maskmovq
28977 : CODE_FOR_sse2_maskmovdqu);
28978 /* Note the arg order is different from the operand order. */
28979 arg1 = CALL_EXPR_ARG (exp, 0);
28980 arg2 = CALL_EXPR_ARG (exp, 1);
28981 arg0 = CALL_EXPR_ARG (exp, 2);
28982 op0 = expand_normal (arg0);
28983 op1 = expand_normal (arg1);
28984 op2 = expand_normal (arg2);
28985 mode0 = insn_data[icode].operand[0].mode;
28986 mode1 = insn_data[icode].operand[1].mode;
28987 mode2 = insn_data[icode].operand[2].mode;
28989 if (GET_MODE (op0) != Pmode)
28990 op0 = convert_to_mode (Pmode, op0, 1);
28991 op0 = gen_rtx_MEM (mode1, force_reg (Pmode, op0));
28993 if (!insn_data[icode].operand[0].predicate (op0, mode0))
28994 op0 = copy_to_mode_reg (mode0, op0);
28995 if (!insn_data[icode].operand[1].predicate (op1, mode1))
28996 op1 = copy_to_mode_reg (mode1, op1);
28997 if (!insn_data[icode].operand[2].predicate (op2, mode2))
28998 op2 = copy_to_mode_reg (mode2, op2);
28999 pat = GEN_FCN (icode) (op0, op1, op2);
29005 case IX86_BUILTIN_LDMXCSR:
29006 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
29007 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
29008 emit_move_insn (target, op0);
29009 emit_insn (gen_sse_ldmxcsr (target));
29012 case IX86_BUILTIN_STMXCSR:
29013 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
29014 emit_insn (gen_sse_stmxcsr (target));
29015 return copy_to_mode_reg (SImode, target);
29017 case IX86_BUILTIN_CLFLUSH:
29018 arg0 = CALL_EXPR_ARG (exp, 0);
29019 op0 = expand_normal (arg0);
29020 icode = CODE_FOR_sse2_clflush;
29021 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
29023 if (GET_MODE (op0) != Pmode)
29024 op0 = convert_to_mode (Pmode, op0, 1);
29025 op0 = force_reg (Pmode, op0);
29028 emit_insn (gen_sse2_clflush (op0));
29031 case IX86_BUILTIN_MONITOR:
29032 arg0 = CALL_EXPR_ARG (exp, 0);
29033 arg1 = CALL_EXPR_ARG (exp, 1);
29034 arg2 = CALL_EXPR_ARG (exp, 2);
29035 op0 = expand_normal (arg0);
29036 op1 = expand_normal (arg1);
29037 op2 = expand_normal (arg2);
29040 if (GET_MODE (op0) != Pmode)
29041 op0 = convert_to_mode (Pmode, op0, 1);
29042 op0 = force_reg (Pmode, op0);
29045 op1 = copy_to_mode_reg (SImode, op1);
29047 op2 = copy_to_mode_reg (SImode, op2);
29048 emit_insn (ix86_gen_monitor (op0, op1, op2));
29051 case IX86_BUILTIN_MWAIT:
29052 arg0 = CALL_EXPR_ARG (exp, 0);
29053 arg1 = CALL_EXPR_ARG (exp, 1);
29054 op0 = expand_normal (arg0);
29055 op1 = expand_normal (arg1);
29057 op0 = copy_to_mode_reg (SImode, op0);
29059 op1 = copy_to_mode_reg (SImode, op1);
29060 emit_insn (gen_sse3_mwait (op0, op1));
29063 case IX86_BUILTIN_VEC_INIT_V2SI:
29064 case IX86_BUILTIN_VEC_INIT_V4HI:
29065 case IX86_BUILTIN_VEC_INIT_V8QI:
29066 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
29068 case IX86_BUILTIN_VEC_EXT_V2DF:
29069 case IX86_BUILTIN_VEC_EXT_V2DI:
29070 case IX86_BUILTIN_VEC_EXT_V4SF:
29071 case IX86_BUILTIN_VEC_EXT_V4SI:
29072 case IX86_BUILTIN_VEC_EXT_V8HI:
29073 case IX86_BUILTIN_VEC_EXT_V2SI:
29074 case IX86_BUILTIN_VEC_EXT_V4HI:
29075 case IX86_BUILTIN_VEC_EXT_V16QI:
29076 return ix86_expand_vec_ext_builtin (exp, target);
29078 case IX86_BUILTIN_VEC_SET_V2DI:
29079 case IX86_BUILTIN_VEC_SET_V4SF:
29080 case IX86_BUILTIN_VEC_SET_V4SI:
29081 case IX86_BUILTIN_VEC_SET_V8HI:
29082 case IX86_BUILTIN_VEC_SET_V4HI:
29083 case IX86_BUILTIN_VEC_SET_V16QI:
29084 return ix86_expand_vec_set_builtin (exp);
29086 case IX86_BUILTIN_INFQ:
29087 case IX86_BUILTIN_HUGE_VALQ:
29089 REAL_VALUE_TYPE inf;
29093 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
29095 tmp = validize_mem (force_const_mem (mode, tmp));
29098 target = gen_reg_rtx (mode);
29100 emit_move_insn (target, tmp);
29104 case IX86_BUILTIN_LLWPCB:
29105 arg0 = CALL_EXPR_ARG (exp, 0);
29106 op0 = expand_normal (arg0);
29107 icode = CODE_FOR_lwp_llwpcb;
29108 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
29110 if (GET_MODE (op0) != Pmode)
29111 op0 = convert_to_mode (Pmode, op0, 1);
29112 op0 = force_reg (Pmode, op0);
29114 emit_insn (gen_lwp_llwpcb (op0));
29117 case IX86_BUILTIN_SLWPCB:
29118 icode = CODE_FOR_lwp_slwpcb;
29120 || !insn_data[icode].operand[0].predicate (target, Pmode))
29121 target = gen_reg_rtx (Pmode);
29122 emit_insn (gen_lwp_slwpcb (target));
29125 case IX86_BUILTIN_BEXTRI32:
29126 case IX86_BUILTIN_BEXTRI64:
29127 arg0 = CALL_EXPR_ARG (exp, 0);
29128 arg1 = CALL_EXPR_ARG (exp, 1);
29129 op0 = expand_normal (arg0);
29130 op1 = expand_normal (arg1);
29131 icode = (fcode == IX86_BUILTIN_BEXTRI32
29132 ? CODE_FOR_tbm_bextri_si
29133 : CODE_FOR_tbm_bextri_di);
29134 if (!CONST_INT_P (op1))
29136 error ("last argument must be an immediate");
29141 unsigned char length = (INTVAL (op1) >> 8) & 0xFF;
29142 unsigned char lsb_index = INTVAL (op1) & 0xFF;
29143 op1 = GEN_INT (length);
29144 op2 = GEN_INT (lsb_index);
29145 pat = GEN_FCN (icode) (target, op0, op1, op2);
29151 case IX86_BUILTIN_RDRAND16_STEP:
29152 icode = CODE_FOR_rdrandhi_1;
29156 case IX86_BUILTIN_RDRAND32_STEP:
29157 icode = CODE_FOR_rdrandsi_1;
29161 case IX86_BUILTIN_RDRAND64_STEP:
29162 icode = CODE_FOR_rdranddi_1;
29166 op0 = gen_reg_rtx (mode0);
29167 emit_insn (GEN_FCN (icode) (op0));
29169 arg0 = CALL_EXPR_ARG (exp, 0);
29170 op1 = expand_normal (arg0);
29171 if (!address_operand (op1, VOIDmode))
29173 op1 = convert_memory_address (Pmode, op1);
29174 op1 = copy_addr_to_reg (op1);
29176 emit_move_insn (gen_rtx_MEM (mode0, op1), op0);
29178 op1 = gen_reg_rtx (SImode);
29179 emit_move_insn (op1, CONST1_RTX (SImode));
29181 /* Emit SImode conditional move. */
29182 if (mode0 == HImode)
29184 op2 = gen_reg_rtx (SImode);
29185 emit_insn (gen_zero_extendhisi2 (op2, op0));
29187 else if (mode0 == SImode)
29190 op2 = gen_rtx_SUBREG (SImode, op0, 0);
29193 target = gen_reg_rtx (SImode);
29195 pat = gen_rtx_GEU (VOIDmode, gen_rtx_REG (CCCmode, FLAGS_REG),
29197 emit_insn (gen_rtx_SET (VOIDmode, target,
29198 gen_rtx_IF_THEN_ELSE (SImode, pat, op2, op1)));
29201 case IX86_BUILTIN_GATHERSIV2DF:
29202 icode = CODE_FOR_avx2_gathersiv2df;
29204 case IX86_BUILTIN_GATHERSIV4DF:
29205 icode = CODE_FOR_avx2_gathersiv4df;
29207 case IX86_BUILTIN_GATHERDIV2DF:
29208 icode = CODE_FOR_avx2_gatherdiv2df;
29210 case IX86_BUILTIN_GATHERDIV4DF:
29211 icode = CODE_FOR_avx2_gatherdiv4df;
29213 case IX86_BUILTIN_GATHERSIV4SF:
29214 icode = CODE_FOR_avx2_gathersiv4sf;
29216 case IX86_BUILTIN_GATHERSIV8SF:
29217 icode = CODE_FOR_avx2_gathersiv8sf;
29219 case IX86_BUILTIN_GATHERDIV4SF:
29220 icode = CODE_FOR_avx2_gatherdiv4sf;
29222 case IX86_BUILTIN_GATHERDIV8SF:
29223 icode = CODE_FOR_avx2_gatherdiv8sf;
29225 case IX86_BUILTIN_GATHERSIV2DI:
29226 icode = CODE_FOR_avx2_gathersiv2di;
29228 case IX86_BUILTIN_GATHERSIV4DI:
29229 icode = CODE_FOR_avx2_gathersiv4di;
29231 case IX86_BUILTIN_GATHERDIV2DI:
29232 icode = CODE_FOR_avx2_gatherdiv2di;
29234 case IX86_BUILTIN_GATHERDIV4DI:
29235 icode = CODE_FOR_avx2_gatherdiv4di;
29237 case IX86_BUILTIN_GATHERSIV4SI:
29238 icode = CODE_FOR_avx2_gathersiv4si;
29240 case IX86_BUILTIN_GATHERSIV8SI:
29241 icode = CODE_FOR_avx2_gathersiv8si;
29243 case IX86_BUILTIN_GATHERDIV4SI:
29244 icode = CODE_FOR_avx2_gatherdiv4si;
29246 case IX86_BUILTIN_GATHERDIV8SI:
29247 icode = CODE_FOR_avx2_gatherdiv8si;
29249 case IX86_BUILTIN_GATHERALTSIV4DF:
29250 icode = CODE_FOR_avx2_gathersiv4df;
29252 case IX86_BUILTIN_GATHERALTDIV8SF:
29253 icode = CODE_FOR_avx2_gatherdiv8sf;
29255 case IX86_BUILTIN_GATHERALTSIV4DI:
29256 icode = CODE_FOR_avx2_gathersiv4df;
29258 case IX86_BUILTIN_GATHERALTDIV8SI:
29259 icode = CODE_FOR_avx2_gatherdiv8si;
29263 arg0 = CALL_EXPR_ARG (exp, 0);
29264 arg1 = CALL_EXPR_ARG (exp, 1);
29265 arg2 = CALL_EXPR_ARG (exp, 2);
29266 arg3 = CALL_EXPR_ARG (exp, 3);
29267 arg4 = CALL_EXPR_ARG (exp, 4);
29268 op0 = expand_normal (arg0);
29269 op1 = expand_normal (arg1);
29270 op2 = expand_normal (arg2);
29271 op3 = expand_normal (arg3);
29272 op4 = expand_normal (arg4);
29273 /* Note the arg order is different from the operand order. */
29274 mode0 = insn_data[icode].operand[1].mode;
29275 mode2 = insn_data[icode].operand[3].mode;
29276 mode3 = insn_data[icode].operand[4].mode;
29277 mode4 = insn_data[icode].operand[5].mode;
29279 if (target == NULL_RTX
29280 || GET_MODE (target) != insn_data[icode].operand[0].mode)
29281 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
29283 subtarget = target;
29285 if (fcode == IX86_BUILTIN_GATHERALTSIV4DF
29286 || fcode == IX86_BUILTIN_GATHERALTSIV4DI)
29288 rtx half = gen_reg_rtx (V4SImode);
29289 if (!nonimmediate_operand (op2, V8SImode))
29290 op2 = copy_to_mode_reg (V8SImode, op2);
29291 emit_insn (gen_vec_extract_lo_v8si (half, op2));
29294 else if (fcode == IX86_BUILTIN_GATHERALTDIV8SF
29295 || fcode == IX86_BUILTIN_GATHERALTDIV8SI)
29297 rtx (*gen) (rtx, rtx);
29298 rtx half = gen_reg_rtx (mode0);
29299 if (mode0 == V4SFmode)
29300 gen = gen_vec_extract_lo_v8sf;
29302 gen = gen_vec_extract_lo_v8si;
29303 if (!nonimmediate_operand (op0, GET_MODE (op0)))
29304 op0 = copy_to_mode_reg (GET_MODE (op0), op0);
29305 emit_insn (gen (half, op0));
29307 if (!nonimmediate_operand (op3, GET_MODE (op3)))
29308 op3 = copy_to_mode_reg (GET_MODE (op3), op3);
29309 emit_insn (gen (half, op3));
29313 /* Force memory operand only with base register here. But we
29314 don't want to do it on memory operand for other builtin
29316 if (GET_MODE (op1) != Pmode)
29317 op1 = convert_to_mode (Pmode, op1, 1);
29318 op1 = force_reg (Pmode, op1);
29320 if (!insn_data[icode].operand[1].predicate (op0, mode0))
29321 op0 = copy_to_mode_reg (mode0, op0);
29322 if (!insn_data[icode].operand[2].predicate (op1, Pmode))
29323 op1 = copy_to_mode_reg (Pmode, op1);
29324 if (!insn_data[icode].operand[3].predicate (op2, mode2))
29325 op2 = copy_to_mode_reg (mode2, op2);
29326 if (!insn_data[icode].operand[4].predicate (op3, mode3))
29327 op3 = copy_to_mode_reg (mode3, op3);
29328 if (!insn_data[icode].operand[5].predicate (op4, mode4))
29330 error ("last argument must be scale 1, 2, 4, 8");
29334 /* Optimize. If mask is known to have all high bits set,
29335 replace op0 with pc_rtx to signal that the instruction
29336 overwrites the whole destination and doesn't use its
29337 previous contents. */
29340 if (TREE_CODE (arg3) == VECTOR_CST)
29343 unsigned int negative = 0;
29344 for (elt = TREE_VECTOR_CST_ELTS (arg3);
29345 elt; elt = TREE_CHAIN (elt))
29347 tree cst = TREE_VALUE (elt);
29348 if (TREE_CODE (cst) == INTEGER_CST
29349 && tree_int_cst_sign_bit (cst))
29351 else if (TREE_CODE (cst) == REAL_CST
29352 && REAL_VALUE_NEGATIVE (TREE_REAL_CST (cst)))
29355 if (negative == TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg3)))
29358 else if (TREE_CODE (arg3) == SSA_NAME)
29360 /* Recognize also when mask is like:
29361 __v2df src = _mm_setzero_pd ();
29362 __v2df mask = _mm_cmpeq_pd (src, src);
29364 __v8sf src = _mm256_setzero_ps ();
29365 __v8sf mask = _mm256_cmp_ps (src, src, _CMP_EQ_OQ);
29366 as that is a cheaper way to load all ones into
29367 a register than having to load a constant from
29369 gimple def_stmt = SSA_NAME_DEF_STMT (arg3);
29370 if (is_gimple_call (def_stmt))
29372 tree fndecl = gimple_call_fndecl (def_stmt);
29374 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
29375 switch ((unsigned int) DECL_FUNCTION_CODE (fndecl))
29377 case IX86_BUILTIN_CMPPD:
29378 case IX86_BUILTIN_CMPPS:
29379 case IX86_BUILTIN_CMPPD256:
29380 case IX86_BUILTIN_CMPPS256:
29381 if (!integer_zerop (gimple_call_arg (def_stmt, 2)))
29384 case IX86_BUILTIN_CMPEQPD:
29385 case IX86_BUILTIN_CMPEQPS:
29386 if (initializer_zerop (gimple_call_arg (def_stmt, 0))
29387 && initializer_zerop (gimple_call_arg (def_stmt,
29398 pat = GEN_FCN (icode) (subtarget, op0, op1, op2, op3, op4);
29403 if (fcode == IX86_BUILTIN_GATHERDIV8SF
29404 || fcode == IX86_BUILTIN_GATHERDIV8SI)
29406 enum machine_mode tmode = GET_MODE (subtarget) == V8SFmode
29407 ? V4SFmode : V4SImode;
29408 if (target == NULL_RTX)
29409 target = gen_reg_rtx (tmode);
29410 if (tmode == V4SFmode)
29411 emit_insn (gen_vec_extract_lo_v8sf (target, subtarget));
29413 emit_insn (gen_vec_extract_lo_v8si (target, subtarget));
29416 target = subtarget;
29424 for (i = 0, d = bdesc_special_args;
29425 i < ARRAY_SIZE (bdesc_special_args);
29427 if (d->code == fcode)
29428 return ix86_expand_special_args_builtin (d, exp, target);
29430 for (i = 0, d = bdesc_args;
29431 i < ARRAY_SIZE (bdesc_args);
29433 if (d->code == fcode)
29436 case IX86_BUILTIN_FABSQ:
29437 case IX86_BUILTIN_COPYSIGNQ:
29439 /* Emit a normal call if SSE2 isn't available. */
29440 return expand_call (exp, target, ignore);
29442 return ix86_expand_args_builtin (d, exp, target);
29445 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
29446 if (d->code == fcode)
29447 return ix86_expand_sse_comi (d, exp, target);
29449 for (i = 0, d = bdesc_pcmpestr;
29450 i < ARRAY_SIZE (bdesc_pcmpestr);
29452 if (d->code == fcode)
29453 return ix86_expand_sse_pcmpestr (d, exp, target);
29455 for (i = 0, d = bdesc_pcmpistr;
29456 i < ARRAY_SIZE (bdesc_pcmpistr);
29458 if (d->code == fcode)
29459 return ix86_expand_sse_pcmpistr (d, exp, target);
29461 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
29462 if (d->code == fcode)
29463 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
29464 (enum ix86_builtin_func_type)
29465 d->flag, d->comparison);
29467 gcc_unreachable ();
29470 /* Returns a function decl for a vectorized version of the builtin function
29471 with builtin function code FN and the result vector type TYPE, or NULL_TREE
29472 if it is not available. */
29475 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
29478 enum machine_mode in_mode, out_mode;
29480 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
29482 if (TREE_CODE (type_out) != VECTOR_TYPE
29483 || TREE_CODE (type_in) != VECTOR_TYPE
29484 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
29487 out_mode = TYPE_MODE (TREE_TYPE (type_out));
29488 out_n = TYPE_VECTOR_SUBPARTS (type_out);
29489 in_mode = TYPE_MODE (TREE_TYPE (type_in));
29490 in_n = TYPE_VECTOR_SUBPARTS (type_in);
29494 case BUILT_IN_SQRT:
29495 if (out_mode == DFmode && in_mode == DFmode)
29497 if (out_n == 2 && in_n == 2)
29498 return ix86_builtins[IX86_BUILTIN_SQRTPD];
29499 else if (out_n == 4 && in_n == 4)
29500 return ix86_builtins[IX86_BUILTIN_SQRTPD256];
29504 case BUILT_IN_SQRTF:
29505 if (out_mode == SFmode && in_mode == SFmode)
29507 if (out_n == 4 && in_n == 4)
29508 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
29509 else if (out_n == 8 && in_n == 8)
29510 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR256];
29514 case BUILT_IN_IRINT:
29515 case BUILT_IN_LRINT:
29516 case BUILT_IN_LLRINT:
29517 if (out_mode == SImode && in_mode == DFmode)
29519 if (out_n == 4 && in_n == 2)
29520 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
29521 else if (out_n == 8 && in_n == 4)
29522 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX256];
29526 case BUILT_IN_IRINTF:
29527 case BUILT_IN_LRINTF:
29528 case BUILT_IN_LLRINTF:
29529 if (out_mode == SImode && in_mode == SFmode)
29531 if (out_n == 4 && in_n == 4)
29532 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
29533 else if (out_n == 8 && in_n == 8)
29534 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ256];
29538 case BUILT_IN_COPYSIGN:
29539 if (out_mode == DFmode && in_mode == DFmode)
29541 if (out_n == 2 && in_n == 2)
29542 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
29543 else if (out_n == 4 && in_n == 4)
29544 return ix86_builtins[IX86_BUILTIN_CPYSGNPD256];
29548 case BUILT_IN_COPYSIGNF:
29549 if (out_mode == SFmode && in_mode == SFmode)
29551 if (out_n == 4 && in_n == 4)
29552 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
29553 else if (out_n == 8 && in_n == 8)
29554 return ix86_builtins[IX86_BUILTIN_CPYSGNPS256];
29558 case BUILT_IN_FLOOR:
29559 /* The round insn does not trap on denormals. */
29560 if (flag_trapping_math || !TARGET_ROUND)
29563 if (out_mode == DFmode && in_mode == DFmode)
29565 if (out_n == 2 && in_n == 2)
29566 return ix86_builtins[IX86_BUILTIN_FLOORPD];
29567 else if (out_n == 4 && in_n == 4)
29568 return ix86_builtins[IX86_BUILTIN_FLOORPD256];
29572 case BUILT_IN_FLOORF:
29573 /* The round insn does not trap on denormals. */
29574 if (flag_trapping_math || !TARGET_ROUND)
29577 if (out_mode == SFmode && in_mode == SFmode)
29579 if (out_n == 4 && in_n == 4)
29580 return ix86_builtins[IX86_BUILTIN_FLOORPS];
29581 else if (out_n == 8 && in_n == 8)
29582 return ix86_builtins[IX86_BUILTIN_FLOORPS256];
29586 case BUILT_IN_CEIL:
29587 /* The round insn does not trap on denormals. */
29588 if (flag_trapping_math || !TARGET_ROUND)
29591 if (out_mode == DFmode && in_mode == DFmode)
29593 if (out_n == 2 && in_n == 2)
29594 return ix86_builtins[IX86_BUILTIN_CEILPD];
29595 else if (out_n == 4 && in_n == 4)
29596 return ix86_builtins[IX86_BUILTIN_CEILPD256];
29600 case BUILT_IN_CEILF:
29601 /* The round insn does not trap on denormals. */
29602 if (flag_trapping_math || !TARGET_ROUND)
29605 if (out_mode == SFmode && in_mode == SFmode)
29607 if (out_n == 4 && in_n == 4)
29608 return ix86_builtins[IX86_BUILTIN_CEILPS];
29609 else if (out_n == 8 && in_n == 8)
29610 return ix86_builtins[IX86_BUILTIN_CEILPS256];
29614 case BUILT_IN_TRUNC:
29615 /* The round insn does not trap on denormals. */
29616 if (flag_trapping_math || !TARGET_ROUND)
29619 if (out_mode == DFmode && in_mode == DFmode)
29621 if (out_n == 2 && in_n == 2)
29622 return ix86_builtins[IX86_BUILTIN_TRUNCPD];
29623 else if (out_n == 4 && in_n == 4)
29624 return ix86_builtins[IX86_BUILTIN_TRUNCPD256];
29628 case BUILT_IN_TRUNCF:
29629 /* The round insn does not trap on denormals. */
29630 if (flag_trapping_math || !TARGET_ROUND)
29633 if (out_mode == SFmode && in_mode == SFmode)
29635 if (out_n == 4 && in_n == 4)
29636 return ix86_builtins[IX86_BUILTIN_TRUNCPS];
29637 else if (out_n == 8 && in_n == 8)
29638 return ix86_builtins[IX86_BUILTIN_TRUNCPS256];
29642 case BUILT_IN_RINT:
29643 /* The round insn does not trap on denormals. */
29644 if (flag_trapping_math || !TARGET_ROUND)
29647 if (out_mode == DFmode && in_mode == DFmode)
29649 if (out_n == 2 && in_n == 2)
29650 return ix86_builtins[IX86_BUILTIN_RINTPD];
29651 else if (out_n == 4 && in_n == 4)
29652 return ix86_builtins[IX86_BUILTIN_RINTPD256];
29656 case BUILT_IN_RINTF:
29657 /* The round insn does not trap on denormals. */
29658 if (flag_trapping_math || !TARGET_ROUND)
29661 if (out_mode == SFmode && in_mode == SFmode)
29663 if (out_n == 4 && in_n == 4)
29664 return ix86_builtins[IX86_BUILTIN_RINTPS];
29665 else if (out_n == 8 && in_n == 8)
29666 return ix86_builtins[IX86_BUILTIN_RINTPS256];
29670 case BUILT_IN_ROUND:
29671 /* The round insn does not trap on denormals. */
29672 if (flag_trapping_math || !TARGET_ROUND)
29675 if (out_mode == DFmode && in_mode == DFmode)
29677 if (out_n == 2 && in_n == 2)
29678 return ix86_builtins[IX86_BUILTIN_ROUNDPD_AZ];
29679 else if (out_n == 4 && in_n == 4)
29680 return ix86_builtins[IX86_BUILTIN_ROUNDPD_AZ256];
29684 case BUILT_IN_ROUNDF:
29685 /* The round insn does not trap on denormals. */
29686 if (flag_trapping_math || !TARGET_ROUND)
29689 if (out_mode == SFmode && in_mode == SFmode)
29691 if (out_n == 4 && in_n == 4)
29692 return ix86_builtins[IX86_BUILTIN_ROUNDPS_AZ];
29693 else if (out_n == 8 && in_n == 8)
29694 return ix86_builtins[IX86_BUILTIN_ROUNDPS_AZ256];
29699 if (out_mode == DFmode && in_mode == DFmode)
29701 if (out_n == 2 && in_n == 2)
29702 return ix86_builtins[IX86_BUILTIN_VFMADDPD];
29703 if (out_n == 4 && in_n == 4)
29704 return ix86_builtins[IX86_BUILTIN_VFMADDPD256];
29708 case BUILT_IN_FMAF:
29709 if (out_mode == SFmode && in_mode == SFmode)
29711 if (out_n == 4 && in_n == 4)
29712 return ix86_builtins[IX86_BUILTIN_VFMADDPS];
29713 if (out_n == 8 && in_n == 8)
29714 return ix86_builtins[IX86_BUILTIN_VFMADDPS256];
29722 /* Dispatch to a handler for a vectorization library. */
29723 if (ix86_veclib_handler)
29724 return ix86_veclib_handler ((enum built_in_function) fn, type_out,
29730 /* Handler for an SVML-style interface to
29731 a library with vectorized intrinsics. */
29734 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
29737 tree fntype, new_fndecl, args;
29740 enum machine_mode el_mode, in_mode;
29743 /* The SVML is suitable for unsafe math only. */
29744 if (!flag_unsafe_math_optimizations)
29747 el_mode = TYPE_MODE (TREE_TYPE (type_out));
29748 n = TYPE_VECTOR_SUBPARTS (type_out);
29749 in_mode = TYPE_MODE (TREE_TYPE (type_in));
29750 in_n = TYPE_VECTOR_SUBPARTS (type_in);
29751 if (el_mode != in_mode
29759 case BUILT_IN_LOG10:
29761 case BUILT_IN_TANH:
29763 case BUILT_IN_ATAN:
29764 case BUILT_IN_ATAN2:
29765 case BUILT_IN_ATANH:
29766 case BUILT_IN_CBRT:
29767 case BUILT_IN_SINH:
29769 case BUILT_IN_ASINH:
29770 case BUILT_IN_ASIN:
29771 case BUILT_IN_COSH:
29773 case BUILT_IN_ACOSH:
29774 case BUILT_IN_ACOS:
29775 if (el_mode != DFmode || n != 2)
29779 case BUILT_IN_EXPF:
29780 case BUILT_IN_LOGF:
29781 case BUILT_IN_LOG10F:
29782 case BUILT_IN_POWF:
29783 case BUILT_IN_TANHF:
29784 case BUILT_IN_TANF:
29785 case BUILT_IN_ATANF:
29786 case BUILT_IN_ATAN2F:
29787 case BUILT_IN_ATANHF:
29788 case BUILT_IN_CBRTF:
29789 case BUILT_IN_SINHF:
29790 case BUILT_IN_SINF:
29791 case BUILT_IN_ASINHF:
29792 case BUILT_IN_ASINF:
29793 case BUILT_IN_COSHF:
29794 case BUILT_IN_COSF:
29795 case BUILT_IN_ACOSHF:
29796 case BUILT_IN_ACOSF:
29797 if (el_mode != SFmode || n != 4)
29805 bname = IDENTIFIER_POINTER (DECL_NAME (builtin_decl_implicit (fn)));
29807 if (fn == BUILT_IN_LOGF)
29808 strcpy (name, "vmlsLn4");
29809 else if (fn == BUILT_IN_LOG)
29810 strcpy (name, "vmldLn2");
29813 sprintf (name, "vmls%s", bname+10);
29814 name[strlen (name)-1] = '4';
29817 sprintf (name, "vmld%s2", bname+10);
29819 /* Convert to uppercase. */
29823 for (args = DECL_ARGUMENTS (builtin_decl_implicit (fn));
29825 args = TREE_CHAIN (args))
29829 fntype = build_function_type_list (type_out, type_in, NULL);
29831 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
29833 /* Build a function declaration for the vectorized function. */
29834 new_fndecl = build_decl (BUILTINS_LOCATION,
29835 FUNCTION_DECL, get_identifier (name), fntype);
29836 TREE_PUBLIC (new_fndecl) = 1;
29837 DECL_EXTERNAL (new_fndecl) = 1;
29838 DECL_IS_NOVOPS (new_fndecl) = 1;
29839 TREE_READONLY (new_fndecl) = 1;
29844 /* Handler for an ACML-style interface to
29845 a library with vectorized intrinsics. */
29848 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
29850 char name[20] = "__vr.._";
29851 tree fntype, new_fndecl, args;
29854 enum machine_mode el_mode, in_mode;
29857 /* The ACML is 64bits only and suitable for unsafe math only as
29858 it does not correctly support parts of IEEE with the required
29859 precision such as denormals. */
29861 || !flag_unsafe_math_optimizations)
29864 el_mode = TYPE_MODE (TREE_TYPE (type_out));
29865 n = TYPE_VECTOR_SUBPARTS (type_out);
29866 in_mode = TYPE_MODE (TREE_TYPE (type_in));
29867 in_n = TYPE_VECTOR_SUBPARTS (type_in);
29868 if (el_mode != in_mode
29878 case BUILT_IN_LOG2:
29879 case BUILT_IN_LOG10:
29882 if (el_mode != DFmode
29887 case BUILT_IN_SINF:
29888 case BUILT_IN_COSF:
29889 case BUILT_IN_EXPF:
29890 case BUILT_IN_POWF:
29891 case BUILT_IN_LOGF:
29892 case BUILT_IN_LOG2F:
29893 case BUILT_IN_LOG10F:
29896 if (el_mode != SFmode
29905 bname = IDENTIFIER_POINTER (DECL_NAME (builtin_decl_implicit (fn)));
29906 sprintf (name + 7, "%s", bname+10);
29909 for (args = DECL_ARGUMENTS (builtin_decl_implicit (fn));
29911 args = TREE_CHAIN (args))
29915 fntype = build_function_type_list (type_out, type_in, NULL);
29917 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
29919 /* Build a function declaration for the vectorized function. */
29920 new_fndecl = build_decl (BUILTINS_LOCATION,
29921 FUNCTION_DECL, get_identifier (name), fntype);
29922 TREE_PUBLIC (new_fndecl) = 1;
29923 DECL_EXTERNAL (new_fndecl) = 1;
29924 DECL_IS_NOVOPS (new_fndecl) = 1;
29925 TREE_READONLY (new_fndecl) = 1;
29930 /* Returns a decl of a function that implements gather load with
29931 memory type MEM_VECTYPE and index type INDEX_VECTYPE and SCALE.
29932 Return NULL_TREE if it is not available. */
29935 ix86_vectorize_builtin_gather (const_tree mem_vectype,
29936 const_tree index_type, int scale)
29939 enum ix86_builtins code;
29944 if ((TREE_CODE (index_type) != INTEGER_TYPE
29945 && !POINTER_TYPE_P (index_type))
29946 || (TYPE_MODE (index_type) != SImode
29947 && TYPE_MODE (index_type) != DImode))
29950 if (TYPE_PRECISION (index_type) > POINTER_SIZE)
29953 /* v*gather* insn sign extends index to pointer mode. */
29954 if (TYPE_PRECISION (index_type) < POINTER_SIZE
29955 && TYPE_UNSIGNED (index_type))
29960 || (scale & (scale - 1)) != 0)
29963 si = TYPE_MODE (index_type) == SImode;
29964 switch (TYPE_MODE (mem_vectype))
29967 code = si ? IX86_BUILTIN_GATHERSIV2DF : IX86_BUILTIN_GATHERDIV2DF;
29970 code = si ? IX86_BUILTIN_GATHERALTSIV4DF : IX86_BUILTIN_GATHERDIV4DF;
29973 code = si ? IX86_BUILTIN_GATHERSIV2DI : IX86_BUILTIN_GATHERDIV2DI;
29976 code = si ? IX86_BUILTIN_GATHERALTSIV4DI : IX86_BUILTIN_GATHERDIV4DI;
29979 code = si ? IX86_BUILTIN_GATHERSIV4SF : IX86_BUILTIN_GATHERDIV4SF;
29982 code = si ? IX86_BUILTIN_GATHERSIV8SF : IX86_BUILTIN_GATHERALTDIV8SF;
29985 code = si ? IX86_BUILTIN_GATHERSIV4SI : IX86_BUILTIN_GATHERDIV4SI;
29988 code = si ? IX86_BUILTIN_GATHERSIV8SI : IX86_BUILTIN_GATHERALTDIV8SI;
29994 return ix86_builtins[code];
29997 /* Returns a code for a target-specific builtin that implements
29998 reciprocal of the function, or NULL_TREE if not available. */
30001 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
30002 bool sqrt ATTRIBUTE_UNUSED)
30004 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
30005 && flag_finite_math_only && !flag_trapping_math
30006 && flag_unsafe_math_optimizations))
30010 /* Machine dependent builtins. */
30013 /* Vectorized version of sqrt to rsqrt conversion. */
30014 case IX86_BUILTIN_SQRTPS_NR:
30015 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
30017 case IX86_BUILTIN_SQRTPS_NR256:
30018 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR256];
30024 /* Normal builtins. */
30027 /* Sqrt to rsqrt conversion. */
30028 case BUILT_IN_SQRTF:
30029 return ix86_builtins[IX86_BUILTIN_RSQRTF];
30036 /* Helper for avx_vpermilps256_operand et al. This is also used by
30037 the expansion functions to turn the parallel back into a mask.
30038 The return value is 0 for no match and the imm8+1 for a match. */
30041 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
30043 unsigned i, nelt = GET_MODE_NUNITS (mode);
30045 unsigned char ipar[8];
30047 if (XVECLEN (par, 0) != (int) nelt)
30050 /* Validate that all of the elements are constants, and not totally
30051 out of range. Copy the data into an integral array to make the
30052 subsequent checks easier. */
30053 for (i = 0; i < nelt; ++i)
30055 rtx er = XVECEXP (par, 0, i);
30056 unsigned HOST_WIDE_INT ei;
30058 if (!CONST_INT_P (er))
30069 /* In the 256-bit DFmode case, we can only move elements within
30071 for (i = 0; i < 2; ++i)
30075 mask |= ipar[i] << i;
30077 for (i = 2; i < 4; ++i)
30081 mask |= (ipar[i] - 2) << i;
30086 /* In the 256-bit SFmode case, we have full freedom of movement
30087 within the low 128-bit lane, but the high 128-bit lane must
30088 mirror the exact same pattern. */
30089 for (i = 0; i < 4; ++i)
30090 if (ipar[i] + 4 != ipar[i + 4])
30097 /* In the 128-bit case, we've full freedom in the placement of
30098 the elements from the source operand. */
30099 for (i = 0; i < nelt; ++i)
30100 mask |= ipar[i] << (i * (nelt / 2));
30104 gcc_unreachable ();
30107 /* Make sure success has a non-zero value by adding one. */
30111 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
30112 the expansion functions to turn the parallel back into a mask.
30113 The return value is 0 for no match and the imm8+1 for a match. */
30116 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
30118 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
30120 unsigned char ipar[8];
30122 if (XVECLEN (par, 0) != (int) nelt)
30125 /* Validate that all of the elements are constants, and not totally
30126 out of range. Copy the data into an integral array to make the
30127 subsequent checks easier. */
30128 for (i = 0; i < nelt; ++i)
30130 rtx er = XVECEXP (par, 0, i);
30131 unsigned HOST_WIDE_INT ei;
30133 if (!CONST_INT_P (er))
30136 if (ei >= 2 * nelt)
30141 /* Validate that the halves of the permute are halves. */
30142 for (i = 0; i < nelt2 - 1; ++i)
30143 if (ipar[i] + 1 != ipar[i + 1])
30145 for (i = nelt2; i < nelt - 1; ++i)
30146 if (ipar[i] + 1 != ipar[i + 1])
30149 /* Reconstruct the mask. */
30150 for (i = 0; i < 2; ++i)
30152 unsigned e = ipar[i * nelt2];
30156 mask |= e << (i * 4);
30159 /* Make sure success has a non-zero value by adding one. */
30163 /* Store OPERAND to the memory after reload is completed. This means
30164 that we can't easily use assign_stack_local. */
30166 ix86_force_to_memory (enum machine_mode mode, rtx operand)
30170 gcc_assert (reload_completed);
30171 if (ix86_using_red_zone ())
30173 result = gen_rtx_MEM (mode,
30174 gen_rtx_PLUS (Pmode,
30176 GEN_INT (-RED_ZONE_SIZE)));
30177 emit_move_insn (result, operand);
30179 else if (TARGET_64BIT)
30185 operand = gen_lowpart (DImode, operand);
30189 gen_rtx_SET (VOIDmode,
30190 gen_rtx_MEM (DImode,
30191 gen_rtx_PRE_DEC (DImode,
30192 stack_pointer_rtx)),
30196 gcc_unreachable ();
30198 result = gen_rtx_MEM (mode, stack_pointer_rtx);
30207 split_double_mode (mode, &operand, 1, operands, operands + 1);
30209 gen_rtx_SET (VOIDmode,
30210 gen_rtx_MEM (SImode,
30211 gen_rtx_PRE_DEC (Pmode,
30212 stack_pointer_rtx)),
30215 gen_rtx_SET (VOIDmode,
30216 gen_rtx_MEM (SImode,
30217 gen_rtx_PRE_DEC (Pmode,
30218 stack_pointer_rtx)),
30223 /* Store HImodes as SImodes. */
30224 operand = gen_lowpart (SImode, operand);
30228 gen_rtx_SET (VOIDmode,
30229 gen_rtx_MEM (GET_MODE (operand),
30230 gen_rtx_PRE_DEC (SImode,
30231 stack_pointer_rtx)),
30235 gcc_unreachable ();
30237 result = gen_rtx_MEM (mode, stack_pointer_rtx);
30242 /* Free operand from the memory. */
30244 ix86_free_from_memory (enum machine_mode mode)
30246 if (!ix86_using_red_zone ())
30250 if (mode == DImode || TARGET_64BIT)
30254 /* Use LEA to deallocate stack space. In peephole2 it will be converted
30255 to pop or add instruction if registers are available. */
30256 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
30257 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
30262 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
30264 Put float CONST_DOUBLE in the constant pool instead of fp regs.
30265 QImode must go into class Q_REGS.
30266 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
30267 movdf to do mem-to-mem moves through integer regs. */
30270 ix86_preferred_reload_class (rtx x, reg_class_t regclass)
30272 enum machine_mode mode = GET_MODE (x);
30274 /* We're only allowed to return a subclass of CLASS. Many of the
30275 following checks fail for NO_REGS, so eliminate that early. */
30276 if (regclass == NO_REGS)
30279 /* All classes can load zeros. */
30280 if (x == CONST0_RTX (mode))
30283 /* Force constants into memory if we are loading a (nonzero) constant into
30284 an MMX or SSE register. This is because there are no MMX/SSE instructions
30285 to load from a constant. */
30287 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
30290 /* Prefer SSE regs only, if we can use them for math. */
30291 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
30292 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
30294 /* Floating-point constants need more complex checks. */
30295 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
30297 /* General regs can load everything. */
30298 if (reg_class_subset_p (regclass, GENERAL_REGS))
30301 /* Floats can load 0 and 1 plus some others. Note that we eliminated
30302 zero above. We only want to wind up preferring 80387 registers if
30303 we plan on doing computation with them. */
30305 && standard_80387_constant_p (x) > 0)
30307 /* Limit class to non-sse. */
30308 if (regclass == FLOAT_SSE_REGS)
30310 if (regclass == FP_TOP_SSE_REGS)
30312 if (regclass == FP_SECOND_SSE_REGS)
30313 return FP_SECOND_REG;
30314 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
30321 /* Generally when we see PLUS here, it's the function invariant
30322 (plus soft-fp const_int). Which can only be computed into general
30324 if (GET_CODE (x) == PLUS)
30325 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
30327 /* QImode constants are easy to load, but non-constant QImode data
30328 must go into Q_REGS. */
30329 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
30331 if (reg_class_subset_p (regclass, Q_REGS))
30333 if (reg_class_subset_p (Q_REGS, regclass))
30341 /* Discourage putting floating-point values in SSE registers unless
30342 SSE math is being used, and likewise for the 387 registers. */
30344 ix86_preferred_output_reload_class (rtx x, reg_class_t regclass)
30346 enum machine_mode mode = GET_MODE (x);
30348 /* Restrict the output reload class to the register bank that we are doing
30349 math on. If we would like not to return a subset of CLASS, reject this
30350 alternative: if reload cannot do this, it will still use its choice. */
30351 mode = GET_MODE (x);
30352 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
30353 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
30355 if (X87_FLOAT_MODE_P (mode))
30357 if (regclass == FP_TOP_SSE_REGS)
30359 else if (regclass == FP_SECOND_SSE_REGS)
30360 return FP_SECOND_REG;
30362 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
30369 ix86_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
30370 enum machine_mode mode, secondary_reload_info *sri)
30372 /* Double-word spills from general registers to non-offsettable memory
30373 references (zero-extended addresses) require special handling. */
30376 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
30377 && rclass == GENERAL_REGS
30378 && !offsettable_memref_p (x))
30381 ? CODE_FOR_reload_noff_load
30382 : CODE_FOR_reload_noff_store);
30383 /* Add the cost of moving address to a temporary. */
30384 sri->extra_cost = 1;
30389 /* QImode spills from non-QI registers require
30390 intermediate register on 32bit targets. */
30392 && !in_p && mode == QImode
30393 && (rclass == GENERAL_REGS
30394 || rclass == LEGACY_REGS
30395 || rclass == INDEX_REGS))
30404 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
30405 regno = true_regnum (x);
30407 /* Return Q_REGS if the operand is in memory. */
30412 /* This condition handles corner case where an expression involving
30413 pointers gets vectorized. We're trying to use the address of a
30414 stack slot as a vector initializer.
30416 (set (reg:V2DI 74 [ vect_cst_.2 ])
30417 (vec_duplicate:V2DI (reg/f:DI 20 frame)))
30419 Eventually frame gets turned into sp+offset like this:
30421 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
30422 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
30423 (const_int 392 [0x188]))))
30425 That later gets turned into:
30427 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
30428 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
30429 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))))
30431 We'll have the following reload recorded:
30433 Reload 0: reload_in (DI) =
30434 (plus:DI (reg/f:DI 7 sp)
30435 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))
30436 reload_out (V2DI) = (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
30437 SSE_REGS, RELOAD_OTHER (opnum = 0), can't combine
30438 reload_in_reg: (plus:DI (reg/f:DI 7 sp) (const_int 392 [0x188]))
30439 reload_out_reg: (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
30440 reload_reg_rtx: (reg:V2DI 22 xmm1)
30442 Which isn't going to work since SSE instructions can't handle scalar
30443 additions. Returning GENERAL_REGS forces the addition into integer
30444 register and reload can handle subsequent reloads without problems. */
30446 if (in_p && GET_CODE (x) == PLUS
30447 && SSE_CLASS_P (rclass)
30448 && SCALAR_INT_MODE_P (mode))
30449 return GENERAL_REGS;
30454 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
30457 ix86_class_likely_spilled_p (reg_class_t rclass)
30468 case SSE_FIRST_REG:
30470 case FP_SECOND_REG:
30480 /* If we are copying between general and FP registers, we need a memory
30481 location. The same is true for SSE and MMX registers.
30483 To optimize register_move_cost performance, allow inline variant.
30485 The macro can't work reliably when one of the CLASSES is class containing
30486 registers from multiple units (SSE, MMX, integer). We avoid this by never
30487 combining those units in single alternative in the machine description.
30488 Ensure that this constraint holds to avoid unexpected surprises.
30490 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
30491 enforce these sanity checks. */
30494 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
30495 enum machine_mode mode, int strict)
30497 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
30498 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
30499 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
30500 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
30501 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
30502 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
30504 gcc_assert (!strict);
30508 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
30511 /* ??? This is a lie. We do have moves between mmx/general, and for
30512 mmx/sse2. But by saying we need secondary memory we discourage the
30513 register allocator from using the mmx registers unless needed. */
30514 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
30517 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
30519 /* SSE1 doesn't have any direct moves from other classes. */
30523 /* If the target says that inter-unit moves are more expensive
30524 than moving through memory, then don't generate them. */
30525 if (!TARGET_INTER_UNIT_MOVES)
30528 /* Between SSE and general, we have moves no larger than word size. */
30529 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
30537 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
30538 enum machine_mode mode, int strict)
30540 return inline_secondary_memory_needed (class1, class2, mode, strict);
30543 /* Implement the TARGET_CLASS_MAX_NREGS hook.
30545 On the 80386, this is the size of MODE in words,
30546 except in the FP regs, where a single reg is always enough. */
30548 static unsigned char
30549 ix86_class_max_nregs (reg_class_t rclass, enum machine_mode mode)
30551 if (MAYBE_INTEGER_CLASS_P (rclass))
30553 if (mode == XFmode)
30554 return (TARGET_64BIT ? 2 : 3);
30555 else if (mode == XCmode)
30556 return (TARGET_64BIT ? 4 : 6);
30558 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
30562 if (COMPLEX_MODE_P (mode))
30569 /* Return true if the registers in CLASS cannot represent the change from
30570 modes FROM to TO. */
30573 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
30574 enum reg_class regclass)
30579 /* x87 registers can't do subreg at all, as all values are reformatted
30580 to extended precision. */
30581 if (MAYBE_FLOAT_CLASS_P (regclass))
30584 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
30586 /* Vector registers do not support QI or HImode loads. If we don't
30587 disallow a change to these modes, reload will assume it's ok to
30588 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
30589 the vec_dupv4hi pattern. */
30590 if (GET_MODE_SIZE (from) < 4)
30593 /* Vector registers do not support subreg with nonzero offsets, which
30594 are otherwise valid for integer registers. Since we can't see
30595 whether we have a nonzero offset from here, prohibit all
30596 nonparadoxical subregs changing size. */
30597 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
30604 /* Return the cost of moving data of mode M between a
30605 register and memory. A value of 2 is the default; this cost is
30606 relative to those in `REGISTER_MOVE_COST'.
30608 This function is used extensively by register_move_cost that is used to
30609 build tables at startup. Make it inline in this case.
30610 When IN is 2, return maximum of in and out move cost.
30612 If moving between registers and memory is more expensive than
30613 between two registers, you should define this macro to express the
30616 Model also increased moving costs of QImode registers in non
30620 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
30624 if (FLOAT_CLASS_P (regclass))
30642 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
30643 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
30645 if (SSE_CLASS_P (regclass))
30648 switch (GET_MODE_SIZE (mode))
30663 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
30664 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
30666 if (MMX_CLASS_P (regclass))
30669 switch (GET_MODE_SIZE (mode))
30681 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
30682 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
30684 switch (GET_MODE_SIZE (mode))
30687 if (Q_CLASS_P (regclass) || TARGET_64BIT)
30690 return ix86_cost->int_store[0];
30691 if (TARGET_PARTIAL_REG_DEPENDENCY
30692 && optimize_function_for_speed_p (cfun))
30693 cost = ix86_cost->movzbl_load;
30695 cost = ix86_cost->int_load[0];
30697 return MAX (cost, ix86_cost->int_store[0]);
30703 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
30705 return ix86_cost->movzbl_load;
30707 return ix86_cost->int_store[0] + 4;
30712 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
30713 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
30715 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
30716 if (mode == TFmode)
30719 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
30721 cost = ix86_cost->int_load[2];
30723 cost = ix86_cost->int_store[2];
30724 return (cost * (((int) GET_MODE_SIZE (mode)
30725 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
30730 ix86_memory_move_cost (enum machine_mode mode, reg_class_t regclass,
30733 return inline_memory_move_cost (mode, (enum reg_class) regclass, in ? 1 : 0);
30737 /* Return the cost of moving data from a register in class CLASS1 to
30738 one in class CLASS2.
30740 It is not required that the cost always equal 2 when FROM is the same as TO;
30741 on some machines it is expensive to move between registers if they are not
30742 general registers. */
30745 ix86_register_move_cost (enum machine_mode mode, reg_class_t class1_i,
30746 reg_class_t class2_i)
30748 enum reg_class class1 = (enum reg_class) class1_i;
30749 enum reg_class class2 = (enum reg_class) class2_i;
30751 /* In case we require secondary memory, compute cost of the store followed
30752 by load. In order to avoid bad register allocation choices, we need
30753 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
30755 if (inline_secondary_memory_needed (class1, class2, mode, 0))
30759 cost += inline_memory_move_cost (mode, class1, 2);
30760 cost += inline_memory_move_cost (mode, class2, 2);
30762 /* In case of copying from general_purpose_register we may emit multiple
30763 stores followed by single load causing memory size mismatch stall.
30764 Count this as arbitrarily high cost of 20. */
30765 if (targetm.class_max_nregs (class1, mode)
30766 > targetm.class_max_nregs (class2, mode))
30769 /* In the case of FP/MMX moves, the registers actually overlap, and we
30770 have to switch modes in order to treat them differently. */
30771 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
30772 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
30778 /* Moves between SSE/MMX and integer unit are expensive. */
30779 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
30780 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
30782 /* ??? By keeping returned value relatively high, we limit the number
30783 of moves between integer and MMX/SSE registers for all targets.
30784 Additionally, high value prevents problem with x86_modes_tieable_p(),
30785 where integer modes in MMX/SSE registers are not tieable
30786 because of missing QImode and HImode moves to, from or between
30787 MMX/SSE registers. */
30788 return MAX (8, ix86_cost->mmxsse_to_integer);
30790 if (MAYBE_FLOAT_CLASS_P (class1))
30791 return ix86_cost->fp_move;
30792 if (MAYBE_SSE_CLASS_P (class1))
30793 return ix86_cost->sse_move;
30794 if (MAYBE_MMX_CLASS_P (class1))
30795 return ix86_cost->mmx_move;
30799 /* Return TRUE if hard register REGNO can hold a value of machine-mode
30803 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
30805 /* Flags and only flags can only hold CCmode values. */
30806 if (CC_REGNO_P (regno))
30807 return GET_MODE_CLASS (mode) == MODE_CC;
30808 if (GET_MODE_CLASS (mode) == MODE_CC
30809 || GET_MODE_CLASS (mode) == MODE_RANDOM
30810 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
30812 if (FP_REGNO_P (regno))
30813 return VALID_FP_MODE_P (mode);
30814 if (SSE_REGNO_P (regno))
30816 /* We implement the move patterns for all vector modes into and
30817 out of SSE registers, even when no operation instructions
30818 are available. OImode move is available only when AVX is
30820 return ((TARGET_AVX && mode == OImode)
30821 || VALID_AVX256_REG_MODE (mode)
30822 || VALID_SSE_REG_MODE (mode)
30823 || VALID_SSE2_REG_MODE (mode)
30824 || VALID_MMX_REG_MODE (mode)
30825 || VALID_MMX_REG_MODE_3DNOW (mode));
30827 if (MMX_REGNO_P (regno))
30829 /* We implement the move patterns for 3DNOW modes even in MMX mode,
30830 so if the register is available at all, then we can move data of
30831 the given mode into or out of it. */
30832 return (VALID_MMX_REG_MODE (mode)
30833 || VALID_MMX_REG_MODE_3DNOW (mode));
30836 if (mode == QImode)
30838 /* Take care for QImode values - they can be in non-QI regs,
30839 but then they do cause partial register stalls. */
30840 if (regno <= BX_REG || TARGET_64BIT)
30842 if (!TARGET_PARTIAL_REG_STALL)
30844 return !can_create_pseudo_p ();
30846 /* We handle both integer and floats in the general purpose registers. */
30847 else if (VALID_INT_MODE_P (mode))
30849 else if (VALID_FP_MODE_P (mode))
30851 else if (VALID_DFP_MODE_P (mode))
30853 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
30854 on to use that value in smaller contexts, this can easily force a
30855 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
30856 supporting DImode, allow it. */
30857 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
30863 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
30864 tieable integer mode. */
30867 ix86_tieable_integer_mode_p (enum machine_mode mode)
30876 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
30879 return TARGET_64BIT;
30886 /* Return true if MODE1 is accessible in a register that can hold MODE2
30887 without copying. That is, all register classes that can hold MODE2
30888 can also hold MODE1. */
30891 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
30893 if (mode1 == mode2)
30896 if (ix86_tieable_integer_mode_p (mode1)
30897 && ix86_tieable_integer_mode_p (mode2))
30900 /* MODE2 being XFmode implies fp stack or general regs, which means we
30901 can tie any smaller floating point modes to it. Note that we do not
30902 tie this with TFmode. */
30903 if (mode2 == XFmode)
30904 return mode1 == SFmode || mode1 == DFmode;
30906 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
30907 that we can tie it with SFmode. */
30908 if (mode2 == DFmode)
30909 return mode1 == SFmode;
30911 /* If MODE2 is only appropriate for an SSE register, then tie with
30912 any other mode acceptable to SSE registers. */
30913 if (GET_MODE_SIZE (mode2) == 16
30914 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
30915 return (GET_MODE_SIZE (mode1) == 16
30916 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
30918 /* If MODE2 is appropriate for an MMX register, then tie
30919 with any other mode acceptable to MMX registers. */
30920 if (GET_MODE_SIZE (mode2) == 8
30921 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
30922 return (GET_MODE_SIZE (mode1) == 8
30923 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
30928 /* Compute a (partial) cost for rtx X. Return true if the complete
30929 cost has been computed, and false if subexpressions should be
30930 scanned. In either case, *TOTAL contains the cost result. */
30933 ix86_rtx_costs (rtx x, int code, int outer_code_i, int opno, int *total,
30936 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
30937 enum machine_mode mode = GET_MODE (x);
30938 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
30946 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
30948 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
30950 else if (flag_pic && SYMBOLIC_CONST (x)
30952 || (!GET_CODE (x) != LABEL_REF
30953 && (GET_CODE (x) != SYMBOL_REF
30954 || !SYMBOL_REF_LOCAL_P (x)))))
30961 if (mode == VOIDmode)
30964 switch (standard_80387_constant_p (x))
30969 default: /* Other constants */
30974 /* Start with (MEM (SYMBOL_REF)), since that's where
30975 it'll probably end up. Add a penalty for size. */
30976 *total = (COSTS_N_INSNS (1)
30977 + (flag_pic != 0 && !TARGET_64BIT)
30978 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
30984 /* The zero extensions is often completely free on x86_64, so make
30985 it as cheap as possible. */
30986 if (TARGET_64BIT && mode == DImode
30987 && GET_MODE (XEXP (x, 0)) == SImode)
30989 else if (TARGET_ZERO_EXTEND_WITH_AND)
30990 *total = cost->add;
30992 *total = cost->movzx;
30996 *total = cost->movsx;
31000 if (CONST_INT_P (XEXP (x, 1))
31001 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
31003 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
31006 *total = cost->add;
31009 if ((value == 2 || value == 3)
31010 && cost->lea <= cost->shift_const)
31012 *total = cost->lea;
31022 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
31024 if (CONST_INT_P (XEXP (x, 1)))
31026 if (INTVAL (XEXP (x, 1)) > 32)
31027 *total = cost->shift_const + COSTS_N_INSNS (2);
31029 *total = cost->shift_const * 2;
31033 if (GET_CODE (XEXP (x, 1)) == AND)
31034 *total = cost->shift_var * 2;
31036 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
31041 if (CONST_INT_P (XEXP (x, 1)))
31042 *total = cost->shift_const;
31044 *total = cost->shift_var;
31052 gcc_assert (FLOAT_MODE_P (mode));
31053 gcc_assert (TARGET_FMA || TARGET_FMA4);
31055 /* ??? SSE scalar/vector cost should be used here. */
31056 /* ??? Bald assumption that fma has the same cost as fmul. */
31057 *total = cost->fmul;
31058 *total += rtx_cost (XEXP (x, 1), FMA, 1, speed);
31060 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
31062 if (GET_CODE (sub) == NEG)
31063 sub = XEXP (sub, 0);
31064 *total += rtx_cost (sub, FMA, 0, speed);
31067 if (GET_CODE (sub) == NEG)
31068 sub = XEXP (sub, 0);
31069 *total += rtx_cost (sub, FMA, 2, speed);
31074 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
31076 /* ??? SSE scalar cost should be used here. */
31077 *total = cost->fmul;
31080 else if (X87_FLOAT_MODE_P (mode))
31082 *total = cost->fmul;
31085 else if (FLOAT_MODE_P (mode))
31087 /* ??? SSE vector cost should be used here. */
31088 *total = cost->fmul;
31093 rtx op0 = XEXP (x, 0);
31094 rtx op1 = XEXP (x, 1);
31096 if (CONST_INT_P (XEXP (x, 1)))
31098 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
31099 for (nbits = 0; value != 0; value &= value - 1)
31103 /* This is arbitrary. */
31106 /* Compute costs correctly for widening multiplication. */
31107 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
31108 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
31109 == GET_MODE_SIZE (mode))
31111 int is_mulwiden = 0;
31112 enum machine_mode inner_mode = GET_MODE (op0);
31114 if (GET_CODE (op0) == GET_CODE (op1))
31115 is_mulwiden = 1, op1 = XEXP (op1, 0);
31116 else if (CONST_INT_P (op1))
31118 if (GET_CODE (op0) == SIGN_EXTEND)
31119 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
31122 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
31126 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
31129 *total = (cost->mult_init[MODE_INDEX (mode)]
31130 + nbits * cost->mult_bit
31131 + rtx_cost (op0, outer_code, opno, speed)
31132 + rtx_cost (op1, outer_code, opno, speed));
31141 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
31142 /* ??? SSE cost should be used here. */
31143 *total = cost->fdiv;
31144 else if (X87_FLOAT_MODE_P (mode))
31145 *total = cost->fdiv;
31146 else if (FLOAT_MODE_P (mode))
31147 /* ??? SSE vector cost should be used here. */
31148 *total = cost->fdiv;
31150 *total = cost->divide[MODE_INDEX (mode)];
31154 if (GET_MODE_CLASS (mode) == MODE_INT
31155 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
31157 if (GET_CODE (XEXP (x, 0)) == PLUS
31158 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
31159 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
31160 && CONSTANT_P (XEXP (x, 1)))
31162 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
31163 if (val == 2 || val == 4 || val == 8)
31165 *total = cost->lea;
31166 *total += rtx_cost (XEXP (XEXP (x, 0), 1),
31167 outer_code, opno, speed);
31168 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
31169 outer_code, opno, speed);
31170 *total += rtx_cost (XEXP (x, 1), outer_code, opno, speed);
31174 else if (GET_CODE (XEXP (x, 0)) == MULT
31175 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
31177 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
31178 if (val == 2 || val == 4 || val == 8)
31180 *total = cost->lea;
31181 *total += rtx_cost (XEXP (XEXP (x, 0), 0),
31182 outer_code, opno, speed);
31183 *total += rtx_cost (XEXP (x, 1), outer_code, opno, speed);
31187 else if (GET_CODE (XEXP (x, 0)) == PLUS)
31189 *total = cost->lea;
31190 *total += rtx_cost (XEXP (XEXP (x, 0), 0),
31191 outer_code, opno, speed);
31192 *total += rtx_cost (XEXP (XEXP (x, 0), 1),
31193 outer_code, opno, speed);
31194 *total += rtx_cost (XEXP (x, 1), outer_code, opno, speed);
31201 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
31203 /* ??? SSE cost should be used here. */
31204 *total = cost->fadd;
31207 else if (X87_FLOAT_MODE_P (mode))
31209 *total = cost->fadd;
31212 else if (FLOAT_MODE_P (mode))
31214 /* ??? SSE vector cost should be used here. */
31215 *total = cost->fadd;
31223 if (!TARGET_64BIT && mode == DImode)
31225 *total = (cost->add * 2
31226 + (rtx_cost (XEXP (x, 0), outer_code, opno, speed)
31227 << (GET_MODE (XEXP (x, 0)) != DImode))
31228 + (rtx_cost (XEXP (x, 1), outer_code, opno, speed)
31229 << (GET_MODE (XEXP (x, 1)) != DImode)));
31235 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
31237 /* ??? SSE cost should be used here. */
31238 *total = cost->fchs;
31241 else if (X87_FLOAT_MODE_P (mode))
31243 *total = cost->fchs;
31246 else if (FLOAT_MODE_P (mode))
31248 /* ??? SSE vector cost should be used here. */
31249 *total = cost->fchs;
31255 if (!TARGET_64BIT && mode == DImode)
31256 *total = cost->add * 2;
31258 *total = cost->add;
31262 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
31263 && XEXP (XEXP (x, 0), 1) == const1_rtx
31264 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
31265 && XEXP (x, 1) == const0_rtx)
31267 /* This kind of construct is implemented using test[bwl].
31268 Treat it as if we had an AND. */
31269 *total = (cost->add
31270 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, opno, speed)
31271 + rtx_cost (const1_rtx, outer_code, opno, speed));
31277 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
31282 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
31283 /* ??? SSE cost should be used here. */
31284 *total = cost->fabs;
31285 else if (X87_FLOAT_MODE_P (mode))
31286 *total = cost->fabs;
31287 else if (FLOAT_MODE_P (mode))
31288 /* ??? SSE vector cost should be used here. */
31289 *total = cost->fabs;
31293 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
31294 /* ??? SSE cost should be used here. */
31295 *total = cost->fsqrt;
31296 else if (X87_FLOAT_MODE_P (mode))
31297 *total = cost->fsqrt;
31298 else if (FLOAT_MODE_P (mode))
31299 /* ??? SSE vector cost should be used here. */
31300 *total = cost->fsqrt;
31304 if (XINT (x, 1) == UNSPEC_TP)
31311 case VEC_DUPLICATE:
31312 /* ??? Assume all of these vector manipulation patterns are
31313 recognizable. In which case they all pretty much have the
31315 *total = COSTS_N_INSNS (1);
31325 static int current_machopic_label_num;
31327 /* Given a symbol name and its associated stub, write out the
31328 definition of the stub. */
31331 machopic_output_stub (FILE *file, const char *symb, const char *stub)
31333 unsigned int length;
31334 char *binder_name, *symbol_name, lazy_ptr_name[32];
31335 int label = ++current_machopic_label_num;
31337 /* For 64-bit we shouldn't get here. */
31338 gcc_assert (!TARGET_64BIT);
31340 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
31341 symb = targetm.strip_name_encoding (symb);
31343 length = strlen (stub);
31344 binder_name = XALLOCAVEC (char, length + 32);
31345 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
31347 length = strlen (symb);
31348 symbol_name = XALLOCAVEC (char, length + 32);
31349 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
31351 sprintf (lazy_ptr_name, "L%d$lz", label);
31353 if (MACHOPIC_ATT_STUB)
31354 switch_to_section (darwin_sections[machopic_picsymbol_stub3_section]);
31355 else if (MACHOPIC_PURE)
31356 switch_to_section (darwin_sections[machopic_picsymbol_stub2_section]);
31358 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
31360 fprintf (file, "%s:\n", stub);
31361 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
31363 if (MACHOPIC_ATT_STUB)
31365 fprintf (file, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
31367 else if (MACHOPIC_PURE)
31370 /* 25-byte PIC stub using "CALL get_pc_thunk". */
31371 rtx tmp = gen_rtx_REG (SImode, 2 /* ECX */);
31372 output_set_got (tmp, NULL_RTX); /* "CALL ___<cpu>.get_pc_thunk.cx". */
31373 fprintf (file, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n",
31374 label, lazy_ptr_name, label);
31375 fprintf (file, "\tjmp\t*%%ecx\n");
31378 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
31380 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
31381 it needs no stub-binding-helper. */
31382 if (MACHOPIC_ATT_STUB)
31385 fprintf (file, "%s:\n", binder_name);
31389 fprintf (file, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name, binder_name);
31390 fprintf (file, "\tpushl\t%%ecx\n");
31393 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
31395 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
31397 /* N.B. Keep the correspondence of these
31398 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
31399 old-pic/new-pic/non-pic stubs; altering this will break
31400 compatibility with existing dylibs. */
31403 /* 25-byte PIC stub using "CALL get_pc_thunk". */
31404 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr2_section]);
31407 /* 16-byte -mdynamic-no-pic stub. */
31408 switch_to_section(darwin_sections[machopic_lazy_symbol_ptr3_section]);
31410 fprintf (file, "%s:\n", lazy_ptr_name);
31411 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
31412 fprintf (file, ASM_LONG "%s\n", binder_name);
31414 #endif /* TARGET_MACHO */
31416 /* Order the registers for register allocator. */
31419 x86_order_regs_for_local_alloc (void)
31424 /* First allocate the local general purpose registers. */
31425 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
31426 if (GENERAL_REGNO_P (i) && call_used_regs[i])
31427 reg_alloc_order [pos++] = i;
31429 /* Global general purpose registers. */
31430 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
31431 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
31432 reg_alloc_order [pos++] = i;
31434 /* x87 registers come first in case we are doing FP math
31436 if (!TARGET_SSE_MATH)
31437 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
31438 reg_alloc_order [pos++] = i;
31440 /* SSE registers. */
31441 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
31442 reg_alloc_order [pos++] = i;
31443 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
31444 reg_alloc_order [pos++] = i;
31446 /* x87 registers. */
31447 if (TARGET_SSE_MATH)
31448 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
31449 reg_alloc_order [pos++] = i;
31451 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
31452 reg_alloc_order [pos++] = i;
31454 /* Initialize the rest of array as we do not allocate some registers
31456 while (pos < FIRST_PSEUDO_REGISTER)
31457 reg_alloc_order [pos++] = 0;
31460 /* Handle a "callee_pop_aggregate_return" attribute; arguments as
31461 in struct attribute_spec handler. */
31463 ix86_handle_callee_pop_aggregate_return (tree *node, tree name,
31465 int flags ATTRIBUTE_UNUSED,
31466 bool *no_add_attrs)
31468 if (TREE_CODE (*node) != FUNCTION_TYPE
31469 && TREE_CODE (*node) != METHOD_TYPE
31470 && TREE_CODE (*node) != FIELD_DECL
31471 && TREE_CODE (*node) != TYPE_DECL)
31473 warning (OPT_Wattributes, "%qE attribute only applies to functions",
31475 *no_add_attrs = true;
31480 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
31482 *no_add_attrs = true;
31485 if (is_attribute_p ("callee_pop_aggregate_return", name))
31489 cst = TREE_VALUE (args);
31490 if (TREE_CODE (cst) != INTEGER_CST)
31492 warning (OPT_Wattributes,
31493 "%qE attribute requires an integer constant argument",
31495 *no_add_attrs = true;
31497 else if (compare_tree_int (cst, 0) != 0
31498 && compare_tree_int (cst, 1) != 0)
31500 warning (OPT_Wattributes,
31501 "argument to %qE attribute is neither zero, nor one",
31503 *no_add_attrs = true;
31512 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
31513 struct attribute_spec.handler. */
31515 ix86_handle_abi_attribute (tree *node, tree name,
31516 tree args ATTRIBUTE_UNUSED,
31517 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
31519 if (TREE_CODE (*node) != FUNCTION_TYPE
31520 && TREE_CODE (*node) != METHOD_TYPE
31521 && TREE_CODE (*node) != FIELD_DECL
31522 && TREE_CODE (*node) != TYPE_DECL)
31524 warning (OPT_Wattributes, "%qE attribute only applies to functions",
31526 *no_add_attrs = true;
31530 /* Can combine regparm with all attributes but fastcall. */
31531 if (is_attribute_p ("ms_abi", name))
31533 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
31535 error ("ms_abi and sysv_abi attributes are not compatible");
31540 else if (is_attribute_p ("sysv_abi", name))
31542 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
31544 error ("ms_abi and sysv_abi attributes are not compatible");
31553 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
31554 struct attribute_spec.handler. */
31556 ix86_handle_struct_attribute (tree *node, tree name,
31557 tree args ATTRIBUTE_UNUSED,
31558 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
31561 if (DECL_P (*node))
31563 if (TREE_CODE (*node) == TYPE_DECL)
31564 type = &TREE_TYPE (*node);
31569 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
31570 || TREE_CODE (*type) == UNION_TYPE)))
31572 warning (OPT_Wattributes, "%qE attribute ignored",
31574 *no_add_attrs = true;
31577 else if ((is_attribute_p ("ms_struct", name)
31578 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
31579 || ((is_attribute_p ("gcc_struct", name)
31580 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
31582 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
31584 *no_add_attrs = true;
31591 ix86_handle_fndecl_attribute (tree *node, tree name,
31592 tree args ATTRIBUTE_UNUSED,
31593 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
31595 if (TREE_CODE (*node) != FUNCTION_DECL)
31597 warning (OPT_Wattributes, "%qE attribute only applies to functions",
31599 *no_add_attrs = true;
31605 ix86_ms_bitfield_layout_p (const_tree record_type)
31607 return ((TARGET_MS_BITFIELD_LAYOUT
31608 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
31609 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type)));
31612 /* Returns an expression indicating where the this parameter is
31613 located on entry to the FUNCTION. */
31616 x86_this_parameter (tree function)
31618 tree type = TREE_TYPE (function);
31619 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
31624 const int *parm_regs;
31626 if (ix86_function_type_abi (type) == MS_ABI)
31627 parm_regs = x86_64_ms_abi_int_parameter_registers;
31629 parm_regs = x86_64_int_parameter_registers;
31630 return gen_rtx_REG (DImode, parm_regs[aggr]);
31633 nregs = ix86_function_regparm (type, function);
31635 if (nregs > 0 && !stdarg_p (type))
31638 unsigned int ccvt = ix86_get_callcvt (type);
31640 if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
31641 regno = aggr ? DX_REG : CX_REG;
31642 else if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
31646 return gen_rtx_MEM (SImode,
31647 plus_constant (stack_pointer_rtx, 4));
31656 return gen_rtx_MEM (SImode,
31657 plus_constant (stack_pointer_rtx, 4));
31660 return gen_rtx_REG (SImode, regno);
31663 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
31666 /* Determine whether x86_output_mi_thunk can succeed. */
31669 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
31670 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
31671 HOST_WIDE_INT vcall_offset, const_tree function)
31673 /* 64-bit can handle anything. */
31677 /* For 32-bit, everything's fine if we have one free register. */
31678 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
31681 /* Need a free register for vcall_offset. */
31685 /* Need a free register for GOT references. */
31686 if (flag_pic && !targetm.binds_local_p (function))
31689 /* Otherwise ok. */
31693 /* Output the assembler code for a thunk function. THUNK_DECL is the
31694 declaration for the thunk function itself, FUNCTION is the decl for
31695 the target function. DELTA is an immediate constant offset to be
31696 added to THIS. If VCALL_OFFSET is nonzero, the word at
31697 *(*this + vcall_offset) should be added to THIS. */
31700 x86_output_mi_thunk (FILE *file,
31701 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
31702 HOST_WIDE_INT vcall_offset, tree function)
31704 rtx this_param = x86_this_parameter (function);
31705 rtx this_reg, tmp, fnaddr;
31707 emit_note (NOTE_INSN_PROLOGUE_END);
31709 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
31710 pull it in now and let DELTA benefit. */
31711 if (REG_P (this_param))
31712 this_reg = this_param;
31713 else if (vcall_offset)
31715 /* Put the this parameter into %eax. */
31716 this_reg = gen_rtx_REG (Pmode, AX_REG);
31717 emit_move_insn (this_reg, this_param);
31720 this_reg = NULL_RTX;
31722 /* Adjust the this parameter by a fixed constant. */
31725 rtx delta_rtx = GEN_INT (delta);
31726 rtx delta_dst = this_reg ? this_reg : this_param;
31730 if (!x86_64_general_operand (delta_rtx, Pmode))
31732 tmp = gen_rtx_REG (Pmode, R10_REG);
31733 emit_move_insn (tmp, delta_rtx);
31738 ix86_emit_binop (PLUS, Pmode, delta_dst, delta_rtx);
31741 /* Adjust the this parameter by a value stored in the vtable. */
31744 rtx vcall_addr, vcall_mem, this_mem;
31745 unsigned int tmp_regno;
31748 tmp_regno = R10_REG;
31751 unsigned int ccvt = ix86_get_callcvt (TREE_TYPE (function));
31752 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) != 0)
31753 tmp_regno = AX_REG;
31755 tmp_regno = CX_REG;
31757 tmp = gen_rtx_REG (Pmode, tmp_regno);
31759 this_mem = gen_rtx_MEM (ptr_mode, this_reg);
31760 if (Pmode != ptr_mode)
31761 this_mem = gen_rtx_ZERO_EXTEND (Pmode, this_mem);
31762 emit_move_insn (tmp, this_mem);
31764 /* Adjust the this parameter. */
31765 vcall_addr = plus_constant (tmp, vcall_offset);
31767 && !ix86_legitimate_address_p (ptr_mode, vcall_addr, true))
31769 rtx tmp2 = gen_rtx_REG (Pmode, R11_REG);
31770 emit_move_insn (tmp2, GEN_INT (vcall_offset));
31771 vcall_addr = gen_rtx_PLUS (Pmode, tmp, tmp2);
31774 vcall_mem = gen_rtx_MEM (ptr_mode, vcall_addr);
31775 if (Pmode != ptr_mode)
31776 emit_insn (gen_addsi_1_zext (this_reg,
31777 gen_rtx_REG (ptr_mode,
31781 ix86_emit_binop (PLUS, Pmode, this_reg, vcall_mem);
31784 /* If necessary, drop THIS back to its stack slot. */
31785 if (this_reg && this_reg != this_param)
31786 emit_move_insn (this_param, this_reg);
31788 fnaddr = XEXP (DECL_RTL (function), 0);
31791 if (!flag_pic || targetm.binds_local_p (function)
31792 || cfun->machine->call_abi == MS_ABI)
31796 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, fnaddr), UNSPEC_GOTPCREL);
31797 tmp = gen_rtx_CONST (Pmode, tmp);
31798 fnaddr = gen_rtx_MEM (Pmode, tmp);
31803 if (!flag_pic || targetm.binds_local_p (function))
31806 else if (TARGET_MACHO)
31808 fnaddr = machopic_indirect_call_target (DECL_RTL (function));
31809 fnaddr = XEXP (fnaddr, 0);
31811 #endif /* TARGET_MACHO */
31814 tmp = gen_rtx_REG (Pmode, CX_REG);
31815 output_set_got (tmp, NULL_RTX);
31817 fnaddr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, fnaddr), UNSPEC_GOT);
31818 fnaddr = gen_rtx_PLUS (Pmode, fnaddr, tmp);
31819 fnaddr = gen_rtx_MEM (Pmode, fnaddr);
31823 /* Our sibling call patterns do not allow memories, because we have no
31824 predicate that can distinguish between frame and non-frame memory.
31825 For our purposes here, we can get away with (ab)using a jump pattern,
31826 because we're going to do no optimization. */
31827 if (MEM_P (fnaddr))
31828 emit_jump_insn (gen_indirect_jump (fnaddr));
31831 tmp = gen_rtx_MEM (QImode, fnaddr);
31832 tmp = gen_rtx_CALL (VOIDmode, tmp, const0_rtx);
31833 tmp = emit_call_insn (tmp);
31834 SIBLING_CALL_P (tmp) = 1;
31838 /* Emit just enough of rest_of_compilation to get the insns emitted.
31839 Note that use_thunk calls assemble_start_function et al. */
31840 tmp = get_insns ();
31841 insn_locators_alloc ();
31842 shorten_branches (tmp);
31843 final_start_function (tmp, file, 1);
31844 final (tmp, file, 1);
31845 final_end_function ();
31849 x86_file_start (void)
31851 default_file_start ();
31853 darwin_file_start ();
31855 if (X86_FILE_START_VERSION_DIRECTIVE)
31856 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
31857 if (X86_FILE_START_FLTUSED)
31858 fputs ("\t.global\t__fltused\n", asm_out_file);
31859 if (ix86_asm_dialect == ASM_INTEL)
31860 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
31864 x86_field_alignment (tree field, int computed)
31866 enum machine_mode mode;
31867 tree type = TREE_TYPE (field);
31869 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
31871 mode = TYPE_MODE (strip_array_types (type));
31872 if (mode == DFmode || mode == DCmode
31873 || GET_MODE_CLASS (mode) == MODE_INT
31874 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
31875 return MIN (32, computed);
31879 /* Output assembler code to FILE to increment profiler label # LABELNO
31880 for profiling a function entry. */
31882 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
31884 const char *mcount_name = (flag_fentry ? MCOUNT_NAME_BEFORE_PROLOGUE
31889 #ifndef NO_PROFILE_COUNTERS
31890 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
31893 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
31894 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name);
31896 fprintf (file, "\tcall\t%s\n", mcount_name);
31900 #ifndef NO_PROFILE_COUNTERS
31901 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
31904 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name);
31908 #ifndef NO_PROFILE_COUNTERS
31909 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
31912 fprintf (file, "\tcall\t%s\n", mcount_name);
31916 /* We don't have exact information about the insn sizes, but we may assume
31917 quite safely that we are informed about all 1 byte insns and memory
31918 address sizes. This is enough to eliminate unnecessary padding in
31922 min_insn_size (rtx insn)
31926 if (!INSN_P (insn) || !active_insn_p (insn))
31929 /* Discard alignments we've emit and jump instructions. */
31930 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
31931 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
31933 if (JUMP_TABLE_DATA_P (insn))
31936 /* Important case - calls are always 5 bytes.
31937 It is common to have many calls in the row. */
31939 && symbolic_reference_mentioned_p (PATTERN (insn))
31940 && !SIBLING_CALL_P (insn))
31942 len = get_attr_length (insn);
31946 /* For normal instructions we rely on get_attr_length being exact,
31947 with a few exceptions. */
31948 if (!JUMP_P (insn))
31950 enum attr_type type = get_attr_type (insn);
31955 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
31956 || asm_noperands (PATTERN (insn)) >= 0)
31963 /* Otherwise trust get_attr_length. */
31967 l = get_attr_length_address (insn);
31968 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
31977 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
31979 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
31983 ix86_avoid_jump_mispredicts (void)
31985 rtx insn, start = get_insns ();
31986 int nbytes = 0, njumps = 0;
31989 /* Look for all minimal intervals of instructions containing 4 jumps.
31990 The intervals are bounded by START and INSN. NBYTES is the total
31991 size of instructions in the interval including INSN and not including
31992 START. When the NBYTES is smaller than 16 bytes, it is possible
31993 that the end of START and INSN ends up in the same 16byte page.
31995 The smallest offset in the page INSN can start is the case where START
31996 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
31997 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
31999 for (insn = start; insn; insn = NEXT_INSN (insn))
32003 if (LABEL_P (insn))
32005 int align = label_to_alignment (insn);
32006 int max_skip = label_to_max_skip (insn);
32010 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
32011 already in the current 16 byte page, because otherwise
32012 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
32013 bytes to reach 16 byte boundary. */
32015 || (align <= 3 && max_skip != (1 << align) - 1))
32018 fprintf (dump_file, "Label %i with max_skip %i\n",
32019 INSN_UID (insn), max_skip);
32022 while (nbytes + max_skip >= 16)
32024 start = NEXT_INSN (start);
32025 if ((JUMP_P (start)
32026 && GET_CODE (PATTERN (start)) != ADDR_VEC
32027 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
32029 njumps--, isjump = 1;
32032 nbytes -= min_insn_size (start);
32038 min_size = min_insn_size (insn);
32039 nbytes += min_size;
32041 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
32042 INSN_UID (insn), min_size);
32044 && GET_CODE (PATTERN (insn)) != ADDR_VEC
32045 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
32053 start = NEXT_INSN (start);
32054 if ((JUMP_P (start)
32055 && GET_CODE (PATTERN (start)) != ADDR_VEC
32056 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
32058 njumps--, isjump = 1;
32061 nbytes -= min_insn_size (start);
32063 gcc_assert (njumps >= 0);
32065 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
32066 INSN_UID (start), INSN_UID (insn), nbytes);
32068 if (njumps == 3 && isjump && nbytes < 16)
32070 int padsize = 15 - nbytes + min_insn_size (insn);
32073 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
32074 INSN_UID (insn), padsize);
32075 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
32081 /* AMD Athlon works faster
32082 when RET is not destination of conditional jump or directly preceded
32083 by other jump instruction. We avoid the penalty by inserting NOP just
32084 before the RET instructions in such cases. */
32086 ix86_pad_returns (void)
32091 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
32093 basic_block bb = e->src;
32094 rtx ret = BB_END (bb);
32096 bool replace = false;
32098 if (!JUMP_P (ret) || !ANY_RETURN_P (PATTERN (ret))
32099 || optimize_bb_for_size_p (bb))
32101 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
32102 if (active_insn_p (prev) || LABEL_P (prev))
32104 if (prev && LABEL_P (prev))
32109 FOR_EACH_EDGE (e, ei, bb->preds)
32110 if (EDGE_FREQUENCY (e) && e->src->index >= 0
32111 && !(e->flags & EDGE_FALLTHRU))
32116 prev = prev_active_insn (ret);
32118 && ((JUMP_P (prev) && any_condjump_p (prev))
32121 /* Empty functions get branch mispredict even when
32122 the jump destination is not visible to us. */
32123 if (!prev && !optimize_function_for_size_p (cfun))
32128 emit_jump_insn_before (gen_simple_return_internal_long (), ret);
32134 /* Count the minimum number of instructions in BB. Return 4 if the
32135 number of instructions >= 4. */
32138 ix86_count_insn_bb (basic_block bb)
32141 int insn_count = 0;
32143 /* Count number of instructions in this block. Return 4 if the number
32144 of instructions >= 4. */
32145 FOR_BB_INSNS (bb, insn)
32147 /* Only happen in exit blocks. */
32149 && ANY_RETURN_P (PATTERN (insn)))
32152 if (NONDEBUG_INSN_P (insn)
32153 && GET_CODE (PATTERN (insn)) != USE
32154 && GET_CODE (PATTERN (insn)) != CLOBBER)
32157 if (insn_count >= 4)
32166 /* Count the minimum number of instructions in code path in BB.
32167 Return 4 if the number of instructions >= 4. */
32170 ix86_count_insn (basic_block bb)
32174 int min_prev_count;
32176 /* Only bother counting instructions along paths with no
32177 more than 2 basic blocks between entry and exit. Given
32178 that BB has an edge to exit, determine if a predecessor
32179 of BB has an edge from entry. If so, compute the number
32180 of instructions in the predecessor block. If there
32181 happen to be multiple such blocks, compute the minimum. */
32182 min_prev_count = 4;
32183 FOR_EACH_EDGE (e, ei, bb->preds)
32186 edge_iterator prev_ei;
32188 if (e->src == ENTRY_BLOCK_PTR)
32190 min_prev_count = 0;
32193 FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
32195 if (prev_e->src == ENTRY_BLOCK_PTR)
32197 int count = ix86_count_insn_bb (e->src);
32198 if (count < min_prev_count)
32199 min_prev_count = count;
32205 if (min_prev_count < 4)
32206 min_prev_count += ix86_count_insn_bb (bb);
32208 return min_prev_count;
32211 /* Pad short funtion to 4 instructions. */
32214 ix86_pad_short_function (void)
32219 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
32221 rtx ret = BB_END (e->src);
32222 if (JUMP_P (ret) && ANY_RETURN_P (PATTERN (ret)))
32224 int insn_count = ix86_count_insn (e->src);
32226 /* Pad short function. */
32227 if (insn_count < 4)
32231 /* Find epilogue. */
32234 || NOTE_KIND (insn) != NOTE_INSN_EPILOGUE_BEG))
32235 insn = PREV_INSN (insn);
32240 /* Two NOPs count as one instruction. */
32241 insn_count = 2 * (4 - insn_count);
32242 emit_insn_before (gen_nops (GEN_INT (insn_count)), insn);
32248 /* Implement machine specific optimizations. We implement padding of returns
32249 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
32253 /* We are freeing block_for_insn in the toplev to keep compatibility
32254 with old MDEP_REORGS that are not CFG based. Recompute it now. */
32255 compute_bb_for_insn ();
32257 /* Run the vzeroupper optimization if needed. */
32258 if (TARGET_VZEROUPPER)
32259 move_or_delete_vzeroupper ();
32261 if (optimize && optimize_function_for_speed_p (cfun))
32263 if (TARGET_PAD_SHORT_FUNCTION)
32264 ix86_pad_short_function ();
32265 else if (TARGET_PAD_RETURNS)
32266 ix86_pad_returns ();
32267 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
32268 if (TARGET_FOUR_JUMP_LIMIT)
32269 ix86_avoid_jump_mispredicts ();
32274 /* Return nonzero when QImode register that must be represented via REX prefix
32277 x86_extended_QIreg_mentioned_p (rtx insn)
32280 extract_insn_cached (insn);
32281 for (i = 0; i < recog_data.n_operands; i++)
32282 if (REG_P (recog_data.operand[i])
32283 && REGNO (recog_data.operand[i]) > BX_REG)
32288 /* Return nonzero when P points to register encoded via REX prefix.
32289 Called via for_each_rtx. */
32291 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
32293 unsigned int regno;
32296 regno = REGNO (*p);
32297 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
32300 /* Return true when INSN mentions register that must be encoded using REX
32303 x86_extended_reg_mentioned_p (rtx insn)
32305 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
32306 extended_reg_mentioned_1, NULL);
32309 /* If profitable, negate (without causing overflow) integer constant
32310 of mode MODE at location LOC. Return true in this case. */
32312 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
32316 if (!CONST_INT_P (*loc))
32322 /* DImode x86_64 constants must fit in 32 bits. */
32323 gcc_assert (x86_64_immediate_operand (*loc, mode));
32334 gcc_unreachable ();
32337 /* Avoid overflows. */
32338 if (mode_signbit_p (mode, *loc))
32341 val = INTVAL (*loc);
32343 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
32344 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
32345 if ((val < 0 && val != -128)
32348 *loc = GEN_INT (-val);
32355 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
32356 optabs would emit if we didn't have TFmode patterns. */
32359 x86_emit_floatuns (rtx operands[2])
32361 rtx neglab, donelab, i0, i1, f0, in, out;
32362 enum machine_mode mode, inmode;
32364 inmode = GET_MODE (operands[1]);
32365 gcc_assert (inmode == SImode || inmode == DImode);
32368 in = force_reg (inmode, operands[1]);
32369 mode = GET_MODE (out);
32370 neglab = gen_label_rtx ();
32371 donelab = gen_label_rtx ();
32372 f0 = gen_reg_rtx (mode);
32374 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
32376 expand_float (out, in, 0);
32378 emit_jump_insn (gen_jump (donelab));
32381 emit_label (neglab);
32383 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
32385 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
32387 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
32389 expand_float (f0, i0, 0);
32391 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
32393 emit_label (donelab);
32396 /* AVX2 does support 32-byte integer vector operations,
32397 thus the longest vector we are faced with is V32QImode. */
32398 #define MAX_VECT_LEN 32
32400 struct expand_vec_perm_d
32402 rtx target, op0, op1;
32403 unsigned char perm[MAX_VECT_LEN];
32404 enum machine_mode vmode;
32405 unsigned char nelt;
32409 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
32410 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
32412 /* Get a vector mode of the same size as the original but with elements
32413 twice as wide. This is only guaranteed to apply to integral vectors. */
32415 static inline enum machine_mode
32416 get_mode_wider_vector (enum machine_mode o)
32418 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
32419 enum machine_mode n = GET_MODE_WIDER_MODE (o);
32420 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
32421 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
32425 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
32426 with all elements equal to VAR. Return true if successful. */
32429 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
32430 rtx target, rtx val)
32453 /* First attempt to recognize VAL as-is. */
32454 dup = gen_rtx_VEC_DUPLICATE (mode, val);
32455 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
32456 if (recog_memoized (insn) < 0)
32459 /* If that fails, force VAL into a register. */
32462 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
32463 seq = get_insns ();
32466 emit_insn_before (seq, insn);
32468 ok = recog_memoized (insn) >= 0;
32477 if (TARGET_SSE || TARGET_3DNOW_A)
32481 val = gen_lowpart (SImode, val);
32482 x = gen_rtx_TRUNCATE (HImode, val);
32483 x = gen_rtx_VEC_DUPLICATE (mode, x);
32484 emit_insn (gen_rtx_SET (VOIDmode, target, x));
32497 struct expand_vec_perm_d dperm;
32501 memset (&dperm, 0, sizeof (dperm));
32502 dperm.target = target;
32503 dperm.vmode = mode;
32504 dperm.nelt = GET_MODE_NUNITS (mode);
32505 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
32507 /* Extend to SImode using a paradoxical SUBREG. */
32508 tmp1 = gen_reg_rtx (SImode);
32509 emit_move_insn (tmp1, gen_lowpart (SImode, val));
32511 /* Insert the SImode value as low element of a V4SImode vector. */
32512 tmp2 = gen_lowpart (V4SImode, dperm.op0);
32513 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
32515 ok = (expand_vec_perm_1 (&dperm)
32516 || expand_vec_perm_broadcast_1 (&dperm));
32528 /* Replicate the value once into the next wider mode and recurse. */
32530 enum machine_mode smode, wsmode, wvmode;
32533 smode = GET_MODE_INNER (mode);
32534 wvmode = get_mode_wider_vector (mode);
32535 wsmode = GET_MODE_INNER (wvmode);
32537 val = convert_modes (wsmode, smode, val, true);
32538 x = expand_simple_binop (wsmode, ASHIFT, val,
32539 GEN_INT (GET_MODE_BITSIZE (smode)),
32540 NULL_RTX, 1, OPTAB_LIB_WIDEN);
32541 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
32543 x = gen_lowpart (wvmode, target);
32544 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
32552 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
32553 rtx x = gen_reg_rtx (hvmode);
32555 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
32558 x = gen_rtx_VEC_CONCAT (mode, x, x);
32559 emit_insn (gen_rtx_SET (VOIDmode, target, x));
32568 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
32569 whose ONE_VAR element is VAR, and other elements are zero. Return true
32573 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
32574 rtx target, rtx var, int one_var)
32576 enum machine_mode vsimode;
32579 bool use_vector_set = false;
32584 /* For SSE4.1, we normally use vector set. But if the second
32585 element is zero and inter-unit moves are OK, we use movq
32587 use_vector_set = (TARGET_64BIT
32589 && !(TARGET_INTER_UNIT_MOVES
32595 use_vector_set = TARGET_SSE4_1;
32598 use_vector_set = TARGET_SSE2;
32601 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
32608 use_vector_set = TARGET_AVX;
32611 /* Use ix86_expand_vector_set in 64bit mode only. */
32612 use_vector_set = TARGET_AVX && TARGET_64BIT;
32618 if (use_vector_set)
32620 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
32621 var = force_reg (GET_MODE_INNER (mode), var);
32622 ix86_expand_vector_set (mmx_ok, target, var, one_var);
32638 var = force_reg (GET_MODE_INNER (mode), var);
32639 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
32640 emit_insn (gen_rtx_SET (VOIDmode, target, x));
32645 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
32646 new_target = gen_reg_rtx (mode);
32648 new_target = target;
32649 var = force_reg (GET_MODE_INNER (mode), var);
32650 x = gen_rtx_VEC_DUPLICATE (mode, var);
32651 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
32652 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
32655 /* We need to shuffle the value to the correct position, so
32656 create a new pseudo to store the intermediate result. */
32658 /* With SSE2, we can use the integer shuffle insns. */
32659 if (mode != V4SFmode && TARGET_SSE2)
32661 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
32663 GEN_INT (one_var == 1 ? 0 : 1),
32664 GEN_INT (one_var == 2 ? 0 : 1),
32665 GEN_INT (one_var == 3 ? 0 : 1)));
32666 if (target != new_target)
32667 emit_move_insn (target, new_target);
32671 /* Otherwise convert the intermediate result to V4SFmode and
32672 use the SSE1 shuffle instructions. */
32673 if (mode != V4SFmode)
32675 tmp = gen_reg_rtx (V4SFmode);
32676 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
32681 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
32683 GEN_INT (one_var == 1 ? 0 : 1),
32684 GEN_INT (one_var == 2 ? 0+4 : 1+4),
32685 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
32687 if (mode != V4SFmode)
32688 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
32689 else if (tmp != target)
32690 emit_move_insn (target, tmp);
32692 else if (target != new_target)
32693 emit_move_insn (target, new_target);
32698 vsimode = V4SImode;
32704 vsimode = V2SImode;
32710 /* Zero extend the variable element to SImode and recurse. */
32711 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
32713 x = gen_reg_rtx (vsimode);
32714 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
32716 gcc_unreachable ();
32718 emit_move_insn (target, gen_lowpart (mode, x));
32726 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
32727 consisting of the values in VALS. It is known that all elements
32728 except ONE_VAR are constants. Return true if successful. */
32731 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
32732 rtx target, rtx vals, int one_var)
32734 rtx var = XVECEXP (vals, 0, one_var);
32735 enum machine_mode wmode;
32738 const_vec = copy_rtx (vals);
32739 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
32740 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
32748 /* For the two element vectors, it's just as easy to use
32749 the general case. */
32753 /* Use ix86_expand_vector_set in 64bit mode only. */
32776 /* There's no way to set one QImode entry easily. Combine
32777 the variable value with its adjacent constant value, and
32778 promote to an HImode set. */
32779 x = XVECEXP (vals, 0, one_var ^ 1);
32782 var = convert_modes (HImode, QImode, var, true);
32783 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
32784 NULL_RTX, 1, OPTAB_LIB_WIDEN);
32785 x = GEN_INT (INTVAL (x) & 0xff);
32789 var = convert_modes (HImode, QImode, var, true);
32790 x = gen_int_mode (INTVAL (x) << 8, HImode);
32792 if (x != const0_rtx)
32793 var = expand_simple_binop (HImode, IOR, var, x, var,
32794 1, OPTAB_LIB_WIDEN);
32796 x = gen_reg_rtx (wmode);
32797 emit_move_insn (x, gen_lowpart (wmode, const_vec));
32798 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
32800 emit_move_insn (target, gen_lowpart (mode, x));
32807 emit_move_insn (target, const_vec);
32808 ix86_expand_vector_set (mmx_ok, target, var, one_var);
32812 /* A subroutine of ix86_expand_vector_init_general. Use vector
32813 concatenate to handle the most general case: all values variable,
32814 and none identical. */
32817 ix86_expand_vector_init_concat (enum machine_mode mode,
32818 rtx target, rtx *ops, int n)
32820 enum machine_mode cmode, hmode = VOIDmode;
32821 rtx first[8], second[4];
32861 gcc_unreachable ();
32864 if (!register_operand (ops[1], cmode))
32865 ops[1] = force_reg (cmode, ops[1]);
32866 if (!register_operand (ops[0], cmode))
32867 ops[0] = force_reg (cmode, ops[0]);
32868 emit_insn (gen_rtx_SET (VOIDmode, target,
32869 gen_rtx_VEC_CONCAT (mode, ops[0],
32889 gcc_unreachable ();
32905 gcc_unreachable ();
32910 /* FIXME: We process inputs backward to help RA. PR 36222. */
32913 for (; i > 0; i -= 2, j--)
32915 first[j] = gen_reg_rtx (cmode);
32916 v = gen_rtvec (2, ops[i - 1], ops[i]);
32917 ix86_expand_vector_init (false, first[j],
32918 gen_rtx_PARALLEL (cmode, v));
32924 gcc_assert (hmode != VOIDmode);
32925 for (i = j = 0; i < n; i += 2, j++)
32927 second[j] = gen_reg_rtx (hmode);
32928 ix86_expand_vector_init_concat (hmode, second [j],
32932 ix86_expand_vector_init_concat (mode, target, second, n);
32935 ix86_expand_vector_init_concat (mode, target, first, n);
32939 gcc_unreachable ();
32943 /* A subroutine of ix86_expand_vector_init_general. Use vector
32944 interleave to handle the most general case: all values variable,
32945 and none identical. */
32948 ix86_expand_vector_init_interleave (enum machine_mode mode,
32949 rtx target, rtx *ops, int n)
32951 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
32954 rtx (*gen_load_even) (rtx, rtx, rtx);
32955 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
32956 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
32961 gen_load_even = gen_vec_setv8hi;
32962 gen_interleave_first_low = gen_vec_interleave_lowv4si;
32963 gen_interleave_second_low = gen_vec_interleave_lowv2di;
32964 inner_mode = HImode;
32965 first_imode = V4SImode;
32966 second_imode = V2DImode;
32967 third_imode = VOIDmode;
32970 gen_load_even = gen_vec_setv16qi;
32971 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
32972 gen_interleave_second_low = gen_vec_interleave_lowv4si;
32973 inner_mode = QImode;
32974 first_imode = V8HImode;
32975 second_imode = V4SImode;
32976 third_imode = V2DImode;
32979 gcc_unreachable ();
32982 for (i = 0; i < n; i++)
32984 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
32985 op0 = gen_reg_rtx (SImode);
32986 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
32988 /* Insert the SImode value as low element of V4SImode vector. */
32989 op1 = gen_reg_rtx (V4SImode);
32990 op0 = gen_rtx_VEC_MERGE (V4SImode,
32991 gen_rtx_VEC_DUPLICATE (V4SImode,
32993 CONST0_RTX (V4SImode),
32995 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
32997 /* Cast the V4SImode vector back to a vector in orignal mode. */
32998 op0 = gen_reg_rtx (mode);
32999 emit_move_insn (op0, gen_lowpart (mode, op1));
33001 /* Load even elements into the second positon. */
33002 emit_insn (gen_load_even (op0,
33003 force_reg (inner_mode,
33007 /* Cast vector to FIRST_IMODE vector. */
33008 ops[i] = gen_reg_rtx (first_imode);
33009 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
33012 /* Interleave low FIRST_IMODE vectors. */
33013 for (i = j = 0; i < n; i += 2, j++)
33015 op0 = gen_reg_rtx (first_imode);
33016 emit_insn (gen_interleave_first_low (op0, ops[i], ops[i + 1]));
33018 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
33019 ops[j] = gen_reg_rtx (second_imode);
33020 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
33023 /* Interleave low SECOND_IMODE vectors. */
33024 switch (second_imode)
33027 for (i = j = 0; i < n / 2; i += 2, j++)
33029 op0 = gen_reg_rtx (second_imode);
33030 emit_insn (gen_interleave_second_low (op0, ops[i],
33033 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
33035 ops[j] = gen_reg_rtx (third_imode);
33036 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
33038 second_imode = V2DImode;
33039 gen_interleave_second_low = gen_vec_interleave_lowv2di;
33043 op0 = gen_reg_rtx (second_imode);
33044 emit_insn (gen_interleave_second_low (op0, ops[0],
33047 /* Cast the SECOND_IMODE vector back to a vector on original
33049 emit_insn (gen_rtx_SET (VOIDmode, target,
33050 gen_lowpart (mode, op0)));
33054 gcc_unreachable ();
33058 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
33059 all values variable, and none identical. */
33062 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
33063 rtx target, rtx vals)
33065 rtx ops[32], op0, op1;
33066 enum machine_mode half_mode = VOIDmode;
33073 if (!mmx_ok && !TARGET_SSE)
33085 n = GET_MODE_NUNITS (mode);
33086 for (i = 0; i < n; i++)
33087 ops[i] = XVECEXP (vals, 0, i);
33088 ix86_expand_vector_init_concat (mode, target, ops, n);
33092 half_mode = V16QImode;
33096 half_mode = V8HImode;
33100 n = GET_MODE_NUNITS (mode);
33101 for (i = 0; i < n; i++)
33102 ops[i] = XVECEXP (vals, 0, i);
33103 op0 = gen_reg_rtx (half_mode);
33104 op1 = gen_reg_rtx (half_mode);
33105 ix86_expand_vector_init_interleave (half_mode, op0, ops,
33107 ix86_expand_vector_init_interleave (half_mode, op1,
33108 &ops [n >> 1], n >> 2);
33109 emit_insn (gen_rtx_SET (VOIDmode, target,
33110 gen_rtx_VEC_CONCAT (mode, op0, op1)));
33114 if (!TARGET_SSE4_1)
33122 /* Don't use ix86_expand_vector_init_interleave if we can't
33123 move from GPR to SSE register directly. */
33124 if (!TARGET_INTER_UNIT_MOVES)
33127 n = GET_MODE_NUNITS (mode);
33128 for (i = 0; i < n; i++)
33129 ops[i] = XVECEXP (vals, 0, i);
33130 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
33138 gcc_unreachable ();
33142 int i, j, n_elts, n_words, n_elt_per_word;
33143 enum machine_mode inner_mode;
33144 rtx words[4], shift;
33146 inner_mode = GET_MODE_INNER (mode);
33147 n_elts = GET_MODE_NUNITS (mode);
33148 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
33149 n_elt_per_word = n_elts / n_words;
33150 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
33152 for (i = 0; i < n_words; ++i)
33154 rtx word = NULL_RTX;
33156 for (j = 0; j < n_elt_per_word; ++j)
33158 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
33159 elt = convert_modes (word_mode, inner_mode, elt, true);
33165 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
33166 word, 1, OPTAB_LIB_WIDEN);
33167 word = expand_simple_binop (word_mode, IOR, word, elt,
33168 word, 1, OPTAB_LIB_WIDEN);
33176 emit_move_insn (target, gen_lowpart (mode, words[0]));
33177 else if (n_words == 2)
33179 rtx tmp = gen_reg_rtx (mode);
33180 emit_clobber (tmp);
33181 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
33182 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
33183 emit_move_insn (target, tmp);
33185 else if (n_words == 4)
33187 rtx tmp = gen_reg_rtx (V4SImode);
33188 gcc_assert (word_mode == SImode);
33189 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
33190 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
33191 emit_move_insn (target, gen_lowpart (mode, tmp));
33194 gcc_unreachable ();
33198 /* Initialize vector TARGET via VALS. Suppress the use of MMX
33199 instructions unless MMX_OK is true. */
33202 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
33204 enum machine_mode mode = GET_MODE (target);
33205 enum machine_mode inner_mode = GET_MODE_INNER (mode);
33206 int n_elts = GET_MODE_NUNITS (mode);
33207 int n_var = 0, one_var = -1;
33208 bool all_same = true, all_const_zero = true;
33212 for (i = 0; i < n_elts; ++i)
33214 x = XVECEXP (vals, 0, i);
33215 if (!(CONST_INT_P (x)
33216 || GET_CODE (x) == CONST_DOUBLE
33217 || GET_CODE (x) == CONST_FIXED))
33218 n_var++, one_var = i;
33219 else if (x != CONST0_RTX (inner_mode))
33220 all_const_zero = false;
33221 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
33225 /* Constants are best loaded from the constant pool. */
33228 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
33232 /* If all values are identical, broadcast the value. */
33234 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
33235 XVECEXP (vals, 0, 0)))
33238 /* Values where only one field is non-constant are best loaded from
33239 the pool and overwritten via move later. */
33243 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
33244 XVECEXP (vals, 0, one_var),
33248 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
33252 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
33256 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
33258 enum machine_mode mode = GET_MODE (target);
33259 enum machine_mode inner_mode = GET_MODE_INNER (mode);
33260 enum machine_mode half_mode;
33261 bool use_vec_merge = false;
33263 static rtx (*gen_extract[6][2]) (rtx, rtx)
33265 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
33266 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
33267 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
33268 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
33269 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
33270 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
33272 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
33274 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
33275 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
33276 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
33277 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
33278 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
33279 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
33289 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
33290 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
33292 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
33294 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
33295 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
33301 use_vec_merge = TARGET_SSE4_1 && TARGET_64BIT;
33305 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
33306 ix86_expand_vector_extract (false, tmp, target, 1 - elt);
33308 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
33310 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
33311 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
33318 /* For the two element vectors, we implement a VEC_CONCAT with
33319 the extraction of the other element. */
33321 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
33322 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
33325 op0 = val, op1 = tmp;
33327 op0 = tmp, op1 = val;
33329 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
33330 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
33335 use_vec_merge = TARGET_SSE4_1;
33342 use_vec_merge = true;
33346 /* tmp = target = A B C D */
33347 tmp = copy_to_reg (target);
33348 /* target = A A B B */
33349 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
33350 /* target = X A B B */
33351 ix86_expand_vector_set (false, target, val, 0);
33352 /* target = A X C D */
33353 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
33354 const1_rtx, const0_rtx,
33355 GEN_INT (2+4), GEN_INT (3+4)));
33359 /* tmp = target = A B C D */
33360 tmp = copy_to_reg (target);
33361 /* tmp = X B C D */
33362 ix86_expand_vector_set (false, tmp, val, 0);
33363 /* target = A B X D */
33364 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
33365 const0_rtx, const1_rtx,
33366 GEN_INT (0+4), GEN_INT (3+4)));
33370 /* tmp = target = A B C D */
33371 tmp = copy_to_reg (target);
33372 /* tmp = X B C D */
33373 ix86_expand_vector_set (false, tmp, val, 0);
33374 /* target = A B X D */
33375 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
33376 const0_rtx, const1_rtx,
33377 GEN_INT (2+4), GEN_INT (0+4)));
33381 gcc_unreachable ();
33386 use_vec_merge = TARGET_SSE4_1;
33390 /* Element 0 handled by vec_merge below. */
33393 use_vec_merge = true;
33399 /* With SSE2, use integer shuffles to swap element 0 and ELT,
33400 store into element 0, then shuffle them back. */
33404 order[0] = GEN_INT (elt);
33405 order[1] = const1_rtx;
33406 order[2] = const2_rtx;
33407 order[3] = GEN_INT (3);
33408 order[elt] = const0_rtx;
33410 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
33411 order[1], order[2], order[3]));
33413 ix86_expand_vector_set (false, target, val, 0);
33415 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
33416 order[1], order[2], order[3]));
33420 /* For SSE1, we have to reuse the V4SF code. */
33421 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
33422 gen_lowpart (SFmode, val), elt);
33427 use_vec_merge = TARGET_SSE2;
33430 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
33434 use_vec_merge = TARGET_SSE4_1;
33441 half_mode = V16QImode;
33447 half_mode = V8HImode;
33453 half_mode = V4SImode;
33459 half_mode = V2DImode;
33465 half_mode = V4SFmode;
33471 half_mode = V2DFmode;
33477 /* Compute offset. */
33481 gcc_assert (i <= 1);
33483 /* Extract the half. */
33484 tmp = gen_reg_rtx (half_mode);
33485 emit_insn (gen_extract[j][i] (tmp, target));
33487 /* Put val in tmp at elt. */
33488 ix86_expand_vector_set (false, tmp, val, elt);
33491 emit_insn (gen_insert[j][i] (target, target, tmp));
33500 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
33501 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
33502 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
33506 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
33508 emit_move_insn (mem, target);
33510 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
33511 emit_move_insn (tmp, val);
33513 emit_move_insn (target, mem);
33518 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
33520 enum machine_mode mode = GET_MODE (vec);
33521 enum machine_mode inner_mode = GET_MODE_INNER (mode);
33522 bool use_vec_extr = false;
33535 use_vec_extr = true;
33539 use_vec_extr = TARGET_SSE4_1;
33551 tmp = gen_reg_rtx (mode);
33552 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
33553 GEN_INT (elt), GEN_INT (elt),
33554 GEN_INT (elt+4), GEN_INT (elt+4)));
33558 tmp = gen_reg_rtx (mode);
33559 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
33563 gcc_unreachable ();
33566 use_vec_extr = true;
33571 use_vec_extr = TARGET_SSE4_1;
33585 tmp = gen_reg_rtx (mode);
33586 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
33587 GEN_INT (elt), GEN_INT (elt),
33588 GEN_INT (elt), GEN_INT (elt)));
33592 tmp = gen_reg_rtx (mode);
33593 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
33597 gcc_unreachable ();
33600 use_vec_extr = true;
33605 /* For SSE1, we have to reuse the V4SF code. */
33606 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
33607 gen_lowpart (V4SFmode, vec), elt);
33613 use_vec_extr = TARGET_SSE2;
33616 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
33620 use_vec_extr = TARGET_SSE4_1;
33626 tmp = gen_reg_rtx (V4SFmode);
33628 emit_insn (gen_vec_extract_lo_v8sf (tmp, vec));
33630 emit_insn (gen_vec_extract_hi_v8sf (tmp, vec));
33631 ix86_expand_vector_extract (false, target, tmp, elt & 3);
33639 tmp = gen_reg_rtx (V2DFmode);
33641 emit_insn (gen_vec_extract_lo_v4df (tmp, vec));
33643 emit_insn (gen_vec_extract_hi_v4df (tmp, vec));
33644 ix86_expand_vector_extract (false, target, tmp, elt & 1);
33652 tmp = gen_reg_rtx (V16QImode);
33654 emit_insn (gen_vec_extract_lo_v32qi (tmp, vec));
33656 emit_insn (gen_vec_extract_hi_v32qi (tmp, vec));
33657 ix86_expand_vector_extract (false, target, tmp, elt & 15);
33665 tmp = gen_reg_rtx (V8HImode);
33667 emit_insn (gen_vec_extract_lo_v16hi (tmp, vec));
33669 emit_insn (gen_vec_extract_hi_v16hi (tmp, vec));
33670 ix86_expand_vector_extract (false, target, tmp, elt & 7);
33678 tmp = gen_reg_rtx (V4SImode);
33680 emit_insn (gen_vec_extract_lo_v8si (tmp, vec));
33682 emit_insn (gen_vec_extract_hi_v8si (tmp, vec));
33683 ix86_expand_vector_extract (false, target, tmp, elt & 3);
33691 tmp = gen_reg_rtx (V2DImode);
33693 emit_insn (gen_vec_extract_lo_v4di (tmp, vec));
33695 emit_insn (gen_vec_extract_hi_v4di (tmp, vec));
33696 ix86_expand_vector_extract (false, target, tmp, elt & 1);
33702 /* ??? Could extract the appropriate HImode element and shift. */
33709 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
33710 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
33712 /* Let the rtl optimizers know about the zero extension performed. */
33713 if (inner_mode == QImode || inner_mode == HImode)
33715 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
33716 target = gen_lowpart (SImode, target);
33719 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
33723 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
33725 emit_move_insn (mem, vec);
33727 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
33728 emit_move_insn (target, tmp);
33732 /* Generate code to copy vector bits i / 2 ... i - 1 from vector SRC
33733 to bits 0 ... i / 2 - 1 of vector DEST, which has the same mode.
33734 The upper bits of DEST are undefined, though they shouldn't cause
33735 exceptions (some bits from src or all zeros are ok). */
33738 emit_reduc_half (rtx dest, rtx src, int i)
33741 switch (GET_MODE (src))
33745 tem = gen_sse_movhlps (dest, src, src);
33747 tem = gen_sse_shufps_v4sf (dest, src, src, const1_rtx, const1_rtx,
33748 GEN_INT (1 + 4), GEN_INT (1 + 4));
33751 tem = gen_vec_interleave_highv2df (dest, src, src);
33757 tem = gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, dest),
33758 gen_lowpart (V1TImode, src),
33763 tem = gen_avx_vperm2f128v8sf3 (dest, src, src, const1_rtx);
33765 tem = gen_avx_shufps256 (dest, src, src,
33766 GEN_INT (i == 128 ? 2 + (3 << 2) : 1));
33770 tem = gen_avx_vperm2f128v4df3 (dest, src, src, const1_rtx);
33772 tem = gen_avx_shufpd256 (dest, src, src, const1_rtx);
33779 tem = gen_avx2_permv2ti (gen_lowpart (V4DImode, dest),
33780 gen_lowpart (V4DImode, src),
33781 gen_lowpart (V4DImode, src),
33784 tem = gen_avx2_lshrv2ti3 (gen_lowpart (V2TImode, dest),
33785 gen_lowpart (V2TImode, src),
33789 gcc_unreachable ();
33794 /* Expand a vector reduction. FN is the binary pattern to reduce;
33795 DEST is the destination; IN is the input vector. */
33798 ix86_expand_reduc (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
33800 rtx half, dst, vec = in;
33801 enum machine_mode mode = GET_MODE (in);
33804 /* SSE4 has a special instruction for V8HImode UMIN reduction. */
33806 && mode == V8HImode
33807 && fn == gen_uminv8hi3)
33809 emit_insn (gen_sse4_1_phminposuw (dest, in));
33813 for (i = GET_MODE_BITSIZE (mode);
33814 i > GET_MODE_BITSIZE (GET_MODE_INNER (mode));
33817 half = gen_reg_rtx (mode);
33818 emit_reduc_half (half, vec, i);
33819 if (i == GET_MODE_BITSIZE (GET_MODE_INNER (mode)) * 2)
33822 dst = gen_reg_rtx (mode);
33823 emit_insn (fn (dst, half, vec));
33828 /* Target hook for scalar_mode_supported_p. */
33830 ix86_scalar_mode_supported_p (enum machine_mode mode)
33832 if (DECIMAL_FLOAT_MODE_P (mode))
33833 return default_decimal_float_supported_p ();
33834 else if (mode == TFmode)
33837 return default_scalar_mode_supported_p (mode);
33840 /* Implements target hook vector_mode_supported_p. */
33842 ix86_vector_mode_supported_p (enum machine_mode mode)
33844 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
33846 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
33848 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
33850 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
33852 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
33857 /* Target hook for c_mode_for_suffix. */
33858 static enum machine_mode
33859 ix86_c_mode_for_suffix (char suffix)
33869 /* Worker function for TARGET_MD_ASM_CLOBBERS.
33871 We do this in the new i386 backend to maintain source compatibility
33872 with the old cc0-based compiler. */
33875 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
33876 tree inputs ATTRIBUTE_UNUSED,
33879 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
33881 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
33886 /* Implements target vector targetm.asm.encode_section_info. */
33888 static void ATTRIBUTE_UNUSED
33889 ix86_encode_section_info (tree decl, rtx rtl, int first)
33891 default_encode_section_info (decl, rtl, first);
33893 if (TREE_CODE (decl) == VAR_DECL
33894 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
33895 && ix86_in_large_data_p (decl))
33896 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
33899 /* Worker function for REVERSE_CONDITION. */
33902 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
33904 return (mode != CCFPmode && mode != CCFPUmode
33905 ? reverse_condition (code)
33906 : reverse_condition_maybe_unordered (code));
33909 /* Output code to perform an x87 FP register move, from OPERANDS[1]
33913 output_387_reg_move (rtx insn, rtx *operands)
33915 if (REG_P (operands[0]))
33917 if (REG_P (operands[1])
33918 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
33920 if (REGNO (operands[0]) == FIRST_STACK_REG)
33921 return output_387_ffreep (operands, 0);
33922 return "fstp\t%y0";
33924 if (STACK_TOP_P (operands[0]))
33925 return "fld%Z1\t%y1";
33928 else if (MEM_P (operands[0]))
33930 gcc_assert (REG_P (operands[1]));
33931 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
33932 return "fstp%Z0\t%y0";
33935 /* There is no non-popping store to memory for XFmode.
33936 So if we need one, follow the store with a load. */
33937 if (GET_MODE (operands[0]) == XFmode)
33938 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
33940 return "fst%Z0\t%y0";
33947 /* Output code to perform a conditional jump to LABEL, if C2 flag in
33948 FP status register is set. */
33951 ix86_emit_fp_unordered_jump (rtx label)
33953 rtx reg = gen_reg_rtx (HImode);
33956 emit_insn (gen_x86_fnstsw_1 (reg));
33958 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
33960 emit_insn (gen_x86_sahf_1 (reg));
33962 temp = gen_rtx_REG (CCmode, FLAGS_REG);
33963 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
33967 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
33969 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
33970 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
33973 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
33974 gen_rtx_LABEL_REF (VOIDmode, label),
33976 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
33978 emit_jump_insn (temp);
33979 predict_jump (REG_BR_PROB_BASE * 10 / 100);
33982 /* Output code to perform a log1p XFmode calculation. */
33984 void ix86_emit_i387_log1p (rtx op0, rtx op1)
33986 rtx label1 = gen_label_rtx ();
33987 rtx label2 = gen_label_rtx ();
33989 rtx tmp = gen_reg_rtx (XFmode);
33990 rtx tmp2 = gen_reg_rtx (XFmode);
33993 emit_insn (gen_absxf2 (tmp, op1));
33994 test = gen_rtx_GE (VOIDmode, tmp,
33995 CONST_DOUBLE_FROM_REAL_VALUE (
33996 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
33998 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
34000 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
34001 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
34002 emit_jump (label2);
34004 emit_label (label1);
34005 emit_move_insn (tmp, CONST1_RTX (XFmode));
34006 emit_insn (gen_addxf3 (tmp, op1, tmp));
34007 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
34008 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
34010 emit_label (label2);
34013 /* Emit code for round calculation. */
34014 void ix86_emit_i387_round (rtx op0, rtx op1)
34016 enum machine_mode inmode = GET_MODE (op1);
34017 enum machine_mode outmode = GET_MODE (op0);
34018 rtx e1, e2, res, tmp, tmp1, half;
34019 rtx scratch = gen_reg_rtx (HImode);
34020 rtx flags = gen_rtx_REG (CCNOmode, FLAGS_REG);
34021 rtx jump_label = gen_label_rtx ();
34023 rtx (*gen_abs) (rtx, rtx);
34024 rtx (*gen_neg) (rtx, rtx);
34029 gen_abs = gen_abssf2;
34032 gen_abs = gen_absdf2;
34035 gen_abs = gen_absxf2;
34038 gcc_unreachable ();
34044 gen_neg = gen_negsf2;
34047 gen_neg = gen_negdf2;
34050 gen_neg = gen_negxf2;
34053 gen_neg = gen_neghi2;
34056 gen_neg = gen_negsi2;
34059 gen_neg = gen_negdi2;
34062 gcc_unreachable ();
34065 e1 = gen_reg_rtx (inmode);
34066 e2 = gen_reg_rtx (inmode);
34067 res = gen_reg_rtx (outmode);
34069 half = CONST_DOUBLE_FROM_REAL_VALUE (dconsthalf, inmode);
34071 /* round(a) = sgn(a) * floor(fabs(a) + 0.5) */
34073 /* scratch = fxam(op1) */
34074 emit_insn (gen_rtx_SET (VOIDmode, scratch,
34075 gen_rtx_UNSPEC (HImode, gen_rtvec (1, op1),
34077 /* e1 = fabs(op1) */
34078 emit_insn (gen_abs (e1, op1));
34080 /* e2 = e1 + 0.5 */
34081 half = force_reg (inmode, half);
34082 emit_insn (gen_rtx_SET (VOIDmode, e2,
34083 gen_rtx_PLUS (inmode, e1, half)));
34085 /* res = floor(e2) */
34086 if (inmode != XFmode)
34088 tmp1 = gen_reg_rtx (XFmode);
34090 emit_insn (gen_rtx_SET (VOIDmode, tmp1,
34091 gen_rtx_FLOAT_EXTEND (XFmode, e2)));
34101 rtx tmp0 = gen_reg_rtx (XFmode);
34103 emit_insn (gen_frndintxf2_floor (tmp0, tmp1));
34105 emit_insn (gen_rtx_SET (VOIDmode, res,
34106 gen_rtx_UNSPEC (outmode, gen_rtvec (1, tmp0),
34107 UNSPEC_TRUNC_NOOP)));
34111 emit_insn (gen_frndintxf2_floor (res, tmp1));
34114 emit_insn (gen_lfloorxfhi2 (res, tmp1));
34117 emit_insn (gen_lfloorxfsi2 (res, tmp1));
34120 emit_insn (gen_lfloorxfdi2 (res, tmp1));
34123 gcc_unreachable ();
34126 /* flags = signbit(a) */
34127 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x02)));
34129 /* if (flags) then res = -res */
34130 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode,
34131 gen_rtx_EQ (VOIDmode, flags, const0_rtx),
34132 gen_rtx_LABEL_REF (VOIDmode, jump_label),
34134 insn = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
34135 predict_jump (REG_BR_PROB_BASE * 50 / 100);
34136 JUMP_LABEL (insn) = jump_label;
34138 emit_insn (gen_neg (res, res));
34140 emit_label (jump_label);
34141 LABEL_NUSES (jump_label) = 1;
34143 emit_move_insn (op0, res);
34146 /* Output code to perform a Newton-Rhapson approximation of a single precision
34147 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
34149 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
34151 rtx x0, x1, e0, e1;
34153 x0 = gen_reg_rtx (mode);
34154 e0 = gen_reg_rtx (mode);
34155 e1 = gen_reg_rtx (mode);
34156 x1 = gen_reg_rtx (mode);
34158 /* a / b = a * ((rcp(b) + rcp(b)) - (b * rcp(b) * rcp (b))) */
34160 b = force_reg (mode, b);
34162 /* x0 = rcp(b) estimate */
34163 emit_insn (gen_rtx_SET (VOIDmode, x0,
34164 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
34167 emit_insn (gen_rtx_SET (VOIDmode, e0,
34168 gen_rtx_MULT (mode, x0, b)));
34171 emit_insn (gen_rtx_SET (VOIDmode, e0,
34172 gen_rtx_MULT (mode, x0, e0)));
34175 emit_insn (gen_rtx_SET (VOIDmode, e1,
34176 gen_rtx_PLUS (mode, x0, x0)));
34179 emit_insn (gen_rtx_SET (VOIDmode, x1,
34180 gen_rtx_MINUS (mode, e1, e0)));
34183 emit_insn (gen_rtx_SET (VOIDmode, res,
34184 gen_rtx_MULT (mode, a, x1)));
34187 /* Output code to perform a Newton-Rhapson approximation of a
34188 single precision floating point [reciprocal] square root. */
34190 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
34193 rtx x0, e0, e1, e2, e3, mthree, mhalf;
34196 x0 = gen_reg_rtx (mode);
34197 e0 = gen_reg_rtx (mode);
34198 e1 = gen_reg_rtx (mode);
34199 e2 = gen_reg_rtx (mode);
34200 e3 = gen_reg_rtx (mode);
34202 real_from_integer (&r, VOIDmode, -3, -1, 0);
34203 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
34205 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
34206 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
34208 if (VECTOR_MODE_P (mode))
34210 mthree = ix86_build_const_vector (mode, true, mthree);
34211 mhalf = ix86_build_const_vector (mode, true, mhalf);
34214 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
34215 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
34217 a = force_reg (mode, a);
34219 /* x0 = rsqrt(a) estimate */
34220 emit_insn (gen_rtx_SET (VOIDmode, x0,
34221 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
34224 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
34229 zero = gen_reg_rtx (mode);
34230 mask = gen_reg_rtx (mode);
34232 zero = force_reg (mode, CONST0_RTX(mode));
34233 emit_insn (gen_rtx_SET (VOIDmode, mask,
34234 gen_rtx_NE (mode, zero, a)));
34236 emit_insn (gen_rtx_SET (VOIDmode, x0,
34237 gen_rtx_AND (mode, x0, mask)));
34241 emit_insn (gen_rtx_SET (VOIDmode, e0,
34242 gen_rtx_MULT (mode, x0, a)));
34244 emit_insn (gen_rtx_SET (VOIDmode, e1,
34245 gen_rtx_MULT (mode, e0, x0)));
34248 mthree = force_reg (mode, mthree);
34249 emit_insn (gen_rtx_SET (VOIDmode, e2,
34250 gen_rtx_PLUS (mode, e1, mthree)));
34252 mhalf = force_reg (mode, mhalf);
34254 /* e3 = -.5 * x0 */
34255 emit_insn (gen_rtx_SET (VOIDmode, e3,
34256 gen_rtx_MULT (mode, x0, mhalf)));
34258 /* e3 = -.5 * e0 */
34259 emit_insn (gen_rtx_SET (VOIDmode, e3,
34260 gen_rtx_MULT (mode, e0, mhalf)));
34261 /* ret = e2 * e3 */
34262 emit_insn (gen_rtx_SET (VOIDmode, res,
34263 gen_rtx_MULT (mode, e2, e3)));
34266 #ifdef TARGET_SOLARIS
34267 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
34270 i386_solaris_elf_named_section (const char *name, unsigned int flags,
34273 /* With Binutils 2.15, the "@unwind" marker must be specified on
34274 every occurrence of the ".eh_frame" section, not just the first
34277 && strcmp (name, ".eh_frame") == 0)
34279 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
34280 flags & SECTION_WRITE ? "aw" : "a");
34285 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
34287 solaris_elf_asm_comdat_section (name, flags, decl);
34292 default_elf_asm_named_section (name, flags, decl);
34294 #endif /* TARGET_SOLARIS */
34296 /* Return the mangling of TYPE if it is an extended fundamental type. */
34298 static const char *
34299 ix86_mangle_type (const_tree type)
34301 type = TYPE_MAIN_VARIANT (type);
34303 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
34304 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
34307 switch (TYPE_MODE (type))
34310 /* __float128 is "g". */
34313 /* "long double" or __float80 is "e". */
34320 /* For 32-bit code we can save PIC register setup by using
34321 __stack_chk_fail_local hidden function instead of calling
34322 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
34323 register, so it is better to call __stack_chk_fail directly. */
34325 static tree ATTRIBUTE_UNUSED
34326 ix86_stack_protect_fail (void)
34328 return TARGET_64BIT
34329 ? default_external_stack_protect_fail ()
34330 : default_hidden_stack_protect_fail ();
34333 /* Select a format to encode pointers in exception handling data. CODE
34334 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
34335 true if the symbol may be affected by dynamic relocations.
34337 ??? All x86 object file formats are capable of representing this.
34338 After all, the relocation needed is the same as for the call insn.
34339 Whether or not a particular assembler allows us to enter such, I
34340 guess we'll have to see. */
34342 asm_preferred_eh_data_format (int code, int global)
34346 int type = DW_EH_PE_sdata8;
34348 || ix86_cmodel == CM_SMALL_PIC
34349 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
34350 type = DW_EH_PE_sdata4;
34351 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
34353 if (ix86_cmodel == CM_SMALL
34354 || (ix86_cmodel == CM_MEDIUM && code))
34355 return DW_EH_PE_udata4;
34356 return DW_EH_PE_absptr;
34359 /* Expand copysign from SIGN to the positive value ABS_VALUE
34360 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
34363 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
34365 enum machine_mode mode = GET_MODE (sign);
34366 rtx sgn = gen_reg_rtx (mode);
34367 if (mask == NULL_RTX)
34369 enum machine_mode vmode;
34371 if (mode == SFmode)
34373 else if (mode == DFmode)
34378 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), false);
34379 if (!VECTOR_MODE_P (mode))
34381 /* We need to generate a scalar mode mask in this case. */
34382 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
34383 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
34384 mask = gen_reg_rtx (mode);
34385 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
34389 mask = gen_rtx_NOT (mode, mask);
34390 emit_insn (gen_rtx_SET (VOIDmode, sgn,
34391 gen_rtx_AND (mode, mask, sign)));
34392 emit_insn (gen_rtx_SET (VOIDmode, result,
34393 gen_rtx_IOR (mode, abs_value, sgn)));
34396 /* Expand fabs (OP0) and return a new rtx that holds the result. The
34397 mask for masking out the sign-bit is stored in *SMASK, if that is
34400 ix86_expand_sse_fabs (rtx op0, rtx *smask)
34402 enum machine_mode vmode, mode = GET_MODE (op0);
34405 xa = gen_reg_rtx (mode);
34406 if (mode == SFmode)
34408 else if (mode == DFmode)
34412 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), true);
34413 if (!VECTOR_MODE_P (mode))
34415 /* We need to generate a scalar mode mask in this case. */
34416 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
34417 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
34418 mask = gen_reg_rtx (mode);
34419 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
34421 emit_insn (gen_rtx_SET (VOIDmode, xa,
34422 gen_rtx_AND (mode, op0, mask)));
34430 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
34431 swapping the operands if SWAP_OPERANDS is true. The expanded
34432 code is a forward jump to a newly created label in case the
34433 comparison is true. The generated label rtx is returned. */
34435 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
34436 bool swap_operands)
34447 label = gen_label_rtx ();
34448 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
34449 emit_insn (gen_rtx_SET (VOIDmode, tmp,
34450 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
34451 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
34452 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
34453 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
34454 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
34455 JUMP_LABEL (tmp) = label;
34460 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
34461 using comparison code CODE. Operands are swapped for the comparison if
34462 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
34464 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
34465 bool swap_operands)
34467 rtx (*insn)(rtx, rtx, rtx, rtx);
34468 enum machine_mode mode = GET_MODE (op0);
34469 rtx mask = gen_reg_rtx (mode);
34478 insn = mode == DFmode ? gen_setcc_df_sse : gen_setcc_sf_sse;
34480 emit_insn (insn (mask, op0, op1,
34481 gen_rtx_fmt_ee (code, mode, op0, op1)));
34485 /* Generate and return a rtx of mode MODE for 2**n where n is the number
34486 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
34488 ix86_gen_TWO52 (enum machine_mode mode)
34490 REAL_VALUE_TYPE TWO52r;
34493 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
34494 TWO52 = const_double_from_real_value (TWO52r, mode);
34495 TWO52 = force_reg (mode, TWO52);
34500 /* Expand SSE sequence for computing lround from OP1 storing
34503 ix86_expand_lround (rtx op0, rtx op1)
34505 /* C code for the stuff we're doing below:
34506 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
34509 enum machine_mode mode = GET_MODE (op1);
34510 const struct real_format *fmt;
34511 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
34514 /* load nextafter (0.5, 0.0) */
34515 fmt = REAL_MODE_FORMAT (mode);
34516 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
34517 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
34519 /* adj = copysign (0.5, op1) */
34520 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
34521 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
34523 /* adj = op1 + adj */
34524 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
34526 /* op0 = (imode)adj */
34527 expand_fix (op0, adj, 0);
34530 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
34533 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
34535 /* C code for the stuff we're doing below (for do_floor):
34537 xi -= (double)xi > op1 ? 1 : 0;
34540 enum machine_mode fmode = GET_MODE (op1);
34541 enum machine_mode imode = GET_MODE (op0);
34542 rtx ireg, freg, label, tmp;
34544 /* reg = (long)op1 */
34545 ireg = gen_reg_rtx (imode);
34546 expand_fix (ireg, op1, 0);
34548 /* freg = (double)reg */
34549 freg = gen_reg_rtx (fmode);
34550 expand_float (freg, ireg, 0);
34552 /* ireg = (freg > op1) ? ireg - 1 : ireg */
34553 label = ix86_expand_sse_compare_and_jump (UNLE,
34554 freg, op1, !do_floor);
34555 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
34556 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
34557 emit_move_insn (ireg, tmp);
34559 emit_label (label);
34560 LABEL_NUSES (label) = 1;
34562 emit_move_insn (op0, ireg);
34565 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
34566 result in OPERAND0. */
34568 ix86_expand_rint (rtx operand0, rtx operand1)
34570 /* C code for the stuff we're doing below:
34571 xa = fabs (operand1);
34572 if (!isless (xa, 2**52))
34574 xa = xa + 2**52 - 2**52;
34575 return copysign (xa, operand1);
34577 enum machine_mode mode = GET_MODE (operand0);
34578 rtx res, xa, label, TWO52, mask;
34580 res = gen_reg_rtx (mode);
34581 emit_move_insn (res, operand1);
34583 /* xa = abs (operand1) */
34584 xa = ix86_expand_sse_fabs (res, &mask);
34586 /* if (!isless (xa, TWO52)) goto label; */
34587 TWO52 = ix86_gen_TWO52 (mode);
34588 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
34590 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
34591 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
34593 ix86_sse_copysign_to_positive (res, xa, res, mask);
34595 emit_label (label);
34596 LABEL_NUSES (label) = 1;
34598 emit_move_insn (operand0, res);
34601 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
34604 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
34606 /* C code for the stuff we expand below.
34607 double xa = fabs (x), x2;
34608 if (!isless (xa, TWO52))
34610 xa = xa + TWO52 - TWO52;
34611 x2 = copysign (xa, x);
34620 enum machine_mode mode = GET_MODE (operand0);
34621 rtx xa, TWO52, tmp, label, one, res, mask;
34623 TWO52 = ix86_gen_TWO52 (mode);
34625 /* Temporary for holding the result, initialized to the input
34626 operand to ease control flow. */
34627 res = gen_reg_rtx (mode);
34628 emit_move_insn (res, operand1);
34630 /* xa = abs (operand1) */
34631 xa = ix86_expand_sse_fabs (res, &mask);
34633 /* if (!isless (xa, TWO52)) goto label; */
34634 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
34636 /* xa = xa + TWO52 - TWO52; */
34637 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
34638 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
34640 /* xa = copysign (xa, operand1) */
34641 ix86_sse_copysign_to_positive (xa, xa, res, mask);
34643 /* generate 1.0 or -1.0 */
34644 one = force_reg (mode,
34645 const_double_from_real_value (do_floor
34646 ? dconst1 : dconstm1, mode));
34648 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
34649 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
34650 emit_insn (gen_rtx_SET (VOIDmode, tmp,
34651 gen_rtx_AND (mode, one, tmp)));
34652 /* We always need to subtract here to preserve signed zero. */
34653 tmp = expand_simple_binop (mode, MINUS,
34654 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
34655 emit_move_insn (res, tmp);
34657 emit_label (label);
34658 LABEL_NUSES (label) = 1;
34660 emit_move_insn (operand0, res);
34663 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
34666 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
34668 /* C code for the stuff we expand below.
34669 double xa = fabs (x), x2;
34670 if (!isless (xa, TWO52))
34672 x2 = (double)(long)x;
34679 if (HONOR_SIGNED_ZEROS (mode))
34680 return copysign (x2, x);
34683 enum machine_mode mode = GET_MODE (operand0);
34684 rtx xa, xi, TWO52, tmp, label, one, res, mask;
34686 TWO52 = ix86_gen_TWO52 (mode);
34688 /* Temporary for holding the result, initialized to the input
34689 operand to ease control flow. */
34690 res = gen_reg_rtx (mode);
34691 emit_move_insn (res, operand1);
34693 /* xa = abs (operand1) */
34694 xa = ix86_expand_sse_fabs (res, &mask);
34696 /* if (!isless (xa, TWO52)) goto label; */
34697 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
34699 /* xa = (double)(long)x */
34700 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
34701 expand_fix (xi, res, 0);
34702 expand_float (xa, xi, 0);
34705 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
34707 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
34708 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
34709 emit_insn (gen_rtx_SET (VOIDmode, tmp,
34710 gen_rtx_AND (mode, one, tmp)));
34711 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
34712 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
34713 emit_move_insn (res, tmp);
34715 if (HONOR_SIGNED_ZEROS (mode))
34716 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
34718 emit_label (label);
34719 LABEL_NUSES (label) = 1;
34721 emit_move_insn (operand0, res);
34724 /* Expand SSE sequence for computing round from OPERAND1 storing
34725 into OPERAND0. Sequence that works without relying on DImode truncation
34726 via cvttsd2siq that is only available on 64bit targets. */
34728 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
34730 /* C code for the stuff we expand below.
34731 double xa = fabs (x), xa2, x2;
34732 if (!isless (xa, TWO52))
34734 Using the absolute value and copying back sign makes
34735 -0.0 -> -0.0 correct.
34736 xa2 = xa + TWO52 - TWO52;
34741 else if (dxa > 0.5)
34743 x2 = copysign (xa2, x);
34746 enum machine_mode mode = GET_MODE (operand0);
34747 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
34749 TWO52 = ix86_gen_TWO52 (mode);
34751 /* Temporary for holding the result, initialized to the input
34752 operand to ease control flow. */
34753 res = gen_reg_rtx (mode);
34754 emit_move_insn (res, operand1);
34756 /* xa = abs (operand1) */
34757 xa = ix86_expand_sse_fabs (res, &mask);
34759 /* if (!isless (xa, TWO52)) goto label; */
34760 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
34762 /* xa2 = xa + TWO52 - TWO52; */
34763 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
34764 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
34766 /* dxa = xa2 - xa; */
34767 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
34769 /* generate 0.5, 1.0 and -0.5 */
34770 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
34771 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
34772 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
34776 tmp = gen_reg_rtx (mode);
34777 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
34778 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
34779 emit_insn (gen_rtx_SET (VOIDmode, tmp,
34780 gen_rtx_AND (mode, one, tmp)));
34781 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
34782 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
34783 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
34784 emit_insn (gen_rtx_SET (VOIDmode, tmp,
34785 gen_rtx_AND (mode, one, tmp)));
34786 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
34788 /* res = copysign (xa2, operand1) */
34789 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
34791 emit_label (label);
34792 LABEL_NUSES (label) = 1;
34794 emit_move_insn (operand0, res);
34797 /* Expand SSE sequence for computing trunc from OPERAND1 storing
34800 ix86_expand_trunc (rtx operand0, rtx operand1)
34802 /* C code for SSE variant we expand below.
34803 double xa = fabs (x), x2;
34804 if (!isless (xa, TWO52))
34806 x2 = (double)(long)x;
34807 if (HONOR_SIGNED_ZEROS (mode))
34808 return copysign (x2, x);
34811 enum machine_mode mode = GET_MODE (operand0);
34812 rtx xa, xi, TWO52, label, res, mask;
34814 TWO52 = ix86_gen_TWO52 (mode);
34816 /* Temporary for holding the result, initialized to the input
34817 operand to ease control flow. */
34818 res = gen_reg_rtx (mode);
34819 emit_move_insn (res, operand1);
34821 /* xa = abs (operand1) */
34822 xa = ix86_expand_sse_fabs (res, &mask);
34824 /* if (!isless (xa, TWO52)) goto label; */
34825 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
34827 /* x = (double)(long)x */
34828 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
34829 expand_fix (xi, res, 0);
34830 expand_float (res, xi, 0);
34832 if (HONOR_SIGNED_ZEROS (mode))
34833 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
34835 emit_label (label);
34836 LABEL_NUSES (label) = 1;
34838 emit_move_insn (operand0, res);
34841 /* Expand SSE sequence for computing trunc from OPERAND1 storing
34844 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
34846 enum machine_mode mode = GET_MODE (operand0);
34847 rtx xa, mask, TWO52, label, one, res, smask, tmp;
34849 /* C code for SSE variant we expand below.
34850 double xa = fabs (x), x2;
34851 if (!isless (xa, TWO52))
34853 xa2 = xa + TWO52 - TWO52;
34857 x2 = copysign (xa2, x);
34861 TWO52 = ix86_gen_TWO52 (mode);
34863 /* Temporary for holding the result, initialized to the input
34864 operand to ease control flow. */
34865 res = gen_reg_rtx (mode);
34866 emit_move_insn (res, operand1);
34868 /* xa = abs (operand1) */
34869 xa = ix86_expand_sse_fabs (res, &smask);
34871 /* if (!isless (xa, TWO52)) goto label; */
34872 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
34874 /* res = xa + TWO52 - TWO52; */
34875 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
34876 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
34877 emit_move_insn (res, tmp);
34880 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
34882 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
34883 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
34884 emit_insn (gen_rtx_SET (VOIDmode, mask,
34885 gen_rtx_AND (mode, mask, one)));
34886 tmp = expand_simple_binop (mode, MINUS,
34887 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
34888 emit_move_insn (res, tmp);
34890 /* res = copysign (res, operand1) */
34891 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
34893 emit_label (label);
34894 LABEL_NUSES (label) = 1;
34896 emit_move_insn (operand0, res);
34899 /* Expand SSE sequence for computing round from OPERAND1 storing
34902 ix86_expand_round (rtx operand0, rtx operand1)
34904 /* C code for the stuff we're doing below:
34905 double xa = fabs (x);
34906 if (!isless (xa, TWO52))
34908 xa = (double)(long)(xa + nextafter (0.5, 0.0));
34909 return copysign (xa, x);
34911 enum machine_mode mode = GET_MODE (operand0);
34912 rtx res, TWO52, xa, label, xi, half, mask;
34913 const struct real_format *fmt;
34914 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
34916 /* Temporary for holding the result, initialized to the input
34917 operand to ease control flow. */
34918 res = gen_reg_rtx (mode);
34919 emit_move_insn (res, operand1);
34921 TWO52 = ix86_gen_TWO52 (mode);
34922 xa = ix86_expand_sse_fabs (res, &mask);
34923 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
34925 /* load nextafter (0.5, 0.0) */
34926 fmt = REAL_MODE_FORMAT (mode);
34927 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
34928 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
34930 /* xa = xa + 0.5 */
34931 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
34932 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
34934 /* xa = (double)(int64_t)xa */
34935 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
34936 expand_fix (xi, xa, 0);
34937 expand_float (xa, xi, 0);
34939 /* res = copysign (xa, operand1) */
34940 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
34942 emit_label (label);
34943 LABEL_NUSES (label) = 1;
34945 emit_move_insn (operand0, res);
34948 /* Expand SSE sequence for computing round
34949 from OP1 storing into OP0 using sse4 round insn. */
34951 ix86_expand_round_sse4 (rtx op0, rtx op1)
34953 enum machine_mode mode = GET_MODE (op0);
34954 rtx e1, e2, res, half;
34955 const struct real_format *fmt;
34956 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
34957 rtx (*gen_copysign) (rtx, rtx, rtx);
34958 rtx (*gen_round) (rtx, rtx, rtx);
34963 gen_copysign = gen_copysignsf3;
34964 gen_round = gen_sse4_1_roundsf2;
34967 gen_copysign = gen_copysigndf3;
34968 gen_round = gen_sse4_1_rounddf2;
34971 gcc_unreachable ();
34974 /* round (a) = trunc (a + copysign (0.5, a)) */
34976 /* load nextafter (0.5, 0.0) */
34977 fmt = REAL_MODE_FORMAT (mode);
34978 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
34979 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
34980 half = const_double_from_real_value (pred_half, mode);
34982 /* e1 = copysign (0.5, op1) */
34983 e1 = gen_reg_rtx (mode);
34984 emit_insn (gen_copysign (e1, half, op1));
34986 /* e2 = op1 + e1 */
34987 e2 = expand_simple_binop (mode, PLUS, op1, e1, NULL_RTX, 0, OPTAB_DIRECT);
34989 /* res = trunc (e2) */
34990 res = gen_reg_rtx (mode);
34991 emit_insn (gen_round (res, e2, GEN_INT (ROUND_TRUNC)));
34993 emit_move_insn (op0, res);
34997 /* Table of valid machine attributes. */
34998 static const struct attribute_spec ix86_attribute_table[] =
35000 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
35001 affects_type_identity } */
35002 /* Stdcall attribute says callee is responsible for popping arguments
35003 if they are not variable. */
35004 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
35006 /* Fastcall attribute says callee is responsible for popping arguments
35007 if they are not variable. */
35008 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
35010 /* Thiscall attribute says callee is responsible for popping arguments
35011 if they are not variable. */
35012 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
35014 /* Cdecl attribute says the callee is a normal C declaration */
35015 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute,
35017 /* Regparm attribute specifies how many integer arguments are to be
35018 passed in registers. */
35019 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute,
35021 /* Sseregparm attribute says we are using x86_64 calling conventions
35022 for FP arguments. */
35023 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute,
35025 /* The transactional memory builtins are implicitly regparm or fastcall
35026 depending on the ABI. Override the generic do-nothing attribute that
35027 these builtins were declared with. */
35028 { "*tm regparm", 0, 0, false, true, true, ix86_handle_tm_regparm_attribute,
35030 /* force_align_arg_pointer says this function realigns the stack at entry. */
35031 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
35032 false, true, true, ix86_handle_cconv_attribute, false },
35033 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
35034 { "dllimport", 0, 0, false, false, false, handle_dll_attribute, false },
35035 { "dllexport", 0, 0, false, false, false, handle_dll_attribute, false },
35036 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute,
35039 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
35041 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
35043 #ifdef SUBTARGET_ATTRIBUTE_TABLE
35044 SUBTARGET_ATTRIBUTE_TABLE,
35046 /* ms_abi and sysv_abi calling convention function attributes. */
35047 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
35048 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
35049 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute,
35051 { "callee_pop_aggregate_return", 1, 1, false, true, true,
35052 ix86_handle_callee_pop_aggregate_return, true },
35054 { NULL, 0, 0, false, false, false, NULL, false }
35057 /* Implement targetm.vectorize.builtin_vectorization_cost. */
35059 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
35060 tree vectype ATTRIBUTE_UNUSED,
35061 int misalign ATTRIBUTE_UNUSED)
35063 switch (type_of_cost)
35066 return ix86_cost->scalar_stmt_cost;
35069 return ix86_cost->scalar_load_cost;
35072 return ix86_cost->scalar_store_cost;
35075 return ix86_cost->vec_stmt_cost;
35078 return ix86_cost->vec_align_load_cost;
35081 return ix86_cost->vec_store_cost;
35083 case vec_to_scalar:
35084 return ix86_cost->vec_to_scalar_cost;
35086 case scalar_to_vec:
35087 return ix86_cost->scalar_to_vec_cost;
35089 case unaligned_load:
35090 case unaligned_store:
35091 return ix86_cost->vec_unalign_load_cost;
35093 case cond_branch_taken:
35094 return ix86_cost->cond_taken_branch_cost;
35096 case cond_branch_not_taken:
35097 return ix86_cost->cond_not_taken_branch_cost;
35103 gcc_unreachable ();
35108 /* Return a vector mode with twice as many elements as VMODE. */
35109 /* ??? Consider moving this to a table generated by genmodes.c. */
35111 static enum machine_mode
35112 doublesize_vector_mode (enum machine_mode vmode)
35116 case V2SFmode: return V4SFmode;
35117 case V1DImode: return V2DImode;
35118 case V2SImode: return V4SImode;
35119 case V4HImode: return V8HImode;
35120 case V8QImode: return V16QImode;
35122 case V2DFmode: return V4DFmode;
35123 case V4SFmode: return V8SFmode;
35124 case V2DImode: return V4DImode;
35125 case V4SImode: return V8SImode;
35126 case V8HImode: return V16HImode;
35127 case V16QImode: return V32QImode;
35129 case V4DFmode: return V8DFmode;
35130 case V8SFmode: return V16SFmode;
35131 case V4DImode: return V8DImode;
35132 case V8SImode: return V16SImode;
35133 case V16HImode: return V32HImode;
35134 case V32QImode: return V64QImode;
35137 gcc_unreachable ();
35141 /* Construct (set target (vec_select op0 (parallel perm))) and
35142 return true if that's a valid instruction in the active ISA. */
35145 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
35147 rtx rperm[MAX_VECT_LEN], x;
35150 for (i = 0; i < nelt; ++i)
35151 rperm[i] = GEN_INT (perm[i]);
35153 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
35154 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
35155 x = gen_rtx_SET (VOIDmode, target, x);
35158 if (recog_memoized (x) < 0)
35166 /* Similar, but generate a vec_concat from op0 and op1 as well. */
35169 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
35170 const unsigned char *perm, unsigned nelt)
35172 enum machine_mode v2mode;
35175 v2mode = doublesize_vector_mode (GET_MODE (op0));
35176 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
35177 return expand_vselect (target, x, perm, nelt);
35180 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
35181 in terms of blendp[sd] / pblendw / pblendvb / vpblendd. */
35184 expand_vec_perm_blend (struct expand_vec_perm_d *d)
35186 enum machine_mode vmode = d->vmode;
35187 unsigned i, mask, nelt = d->nelt;
35188 rtx target, op0, op1, x;
35189 rtx rperm[32], vperm;
35191 if (d->op0 == d->op1)
35193 if (TARGET_AVX2 && GET_MODE_SIZE (vmode) == 32)
35195 else if (TARGET_AVX && (vmode == V4DFmode || vmode == V8SFmode))
35197 else if (TARGET_SSE4_1 && GET_MODE_SIZE (vmode) == 16)
35202 /* This is a blend, not a permute. Elements must stay in their
35203 respective lanes. */
35204 for (i = 0; i < nelt; ++i)
35206 unsigned e = d->perm[i];
35207 if (!(e == i || e == i + nelt))
35214 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
35215 decision should be extracted elsewhere, so that we only try that
35216 sequence once all budget==3 options have been tried. */
35217 target = d->target;
35230 for (i = 0; i < nelt; ++i)
35231 mask |= (d->perm[i] >= nelt) << i;
35235 for (i = 0; i < 2; ++i)
35236 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
35241 for (i = 0; i < 4; ++i)
35242 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
35247 /* See if bytes move in pairs so we can use pblendw with
35248 an immediate argument, rather than pblendvb with a vector
35250 for (i = 0; i < 16; i += 2)
35251 if (d->perm[i] + 1 != d->perm[i + 1])
35254 for (i = 0; i < nelt; ++i)
35255 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
35258 vperm = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rperm));
35259 vperm = force_reg (vmode, vperm);
35261 if (GET_MODE_SIZE (vmode) == 16)
35262 emit_insn (gen_sse4_1_pblendvb (target, op0, op1, vperm));
35264 emit_insn (gen_avx2_pblendvb (target, op0, op1, vperm));
35268 for (i = 0; i < 8; ++i)
35269 mask |= (d->perm[i * 2] >= 16) << i;
35274 target = gen_lowpart (vmode, target);
35275 op0 = gen_lowpart (vmode, op0);
35276 op1 = gen_lowpart (vmode, op1);
35280 /* See if bytes move in pairs. If not, vpblendvb must be used. */
35281 for (i = 0; i < 32; i += 2)
35282 if (d->perm[i] + 1 != d->perm[i + 1])
35284 /* See if bytes move in quadruplets. If yes, vpblendd
35285 with immediate can be used. */
35286 for (i = 0; i < 32; i += 4)
35287 if (d->perm[i] + 2 != d->perm[i + 2])
35291 /* See if bytes move the same in both lanes. If yes,
35292 vpblendw with immediate can be used. */
35293 for (i = 0; i < 16; i += 2)
35294 if (d->perm[i] + 16 != d->perm[i + 16])
35297 /* Use vpblendw. */
35298 for (i = 0; i < 16; ++i)
35299 mask |= (d->perm[i * 2] >= 32) << i;
35304 /* Use vpblendd. */
35305 for (i = 0; i < 8; ++i)
35306 mask |= (d->perm[i * 4] >= 32) << i;
35311 /* See if words move in pairs. If yes, vpblendd can be used. */
35312 for (i = 0; i < 16; i += 2)
35313 if (d->perm[i] + 1 != d->perm[i + 1])
35317 /* See if words move the same in both lanes. If not,
35318 vpblendvb must be used. */
35319 for (i = 0; i < 8; i++)
35320 if (d->perm[i] + 8 != d->perm[i + 8])
35322 /* Use vpblendvb. */
35323 for (i = 0; i < 32; ++i)
35324 rperm[i] = (d->perm[i / 2] < 16 ? const0_rtx : constm1_rtx);
35328 target = gen_lowpart (vmode, target);
35329 op0 = gen_lowpart (vmode, op0);
35330 op1 = gen_lowpart (vmode, op1);
35331 goto finish_pblendvb;
35334 /* Use vpblendw. */
35335 for (i = 0; i < 16; ++i)
35336 mask |= (d->perm[i] >= 16) << i;
35340 /* Use vpblendd. */
35341 for (i = 0; i < 8; ++i)
35342 mask |= (d->perm[i * 2] >= 16) << i;
35347 /* Use vpblendd. */
35348 for (i = 0; i < 4; ++i)
35349 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
35354 gcc_unreachable ();
35357 /* This matches five different patterns with the different modes. */
35358 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
35359 x = gen_rtx_SET (VOIDmode, target, x);
35365 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
35366 in terms of the variable form of vpermilps.
35368 Note that we will have already failed the immediate input vpermilps,
35369 which requires that the high and low part shuffle be identical; the
35370 variable form doesn't require that. */
35373 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
35375 rtx rperm[8], vperm;
35378 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
35381 /* We can only permute within the 128-bit lane. */
35382 for (i = 0; i < 8; ++i)
35384 unsigned e = d->perm[i];
35385 if (i < 4 ? e >= 4 : e < 4)
35392 for (i = 0; i < 8; ++i)
35394 unsigned e = d->perm[i];
35396 /* Within each 128-bit lane, the elements of op0 are numbered
35397 from 0 and the elements of op1 are numbered from 4. */
35403 rperm[i] = GEN_INT (e);
35406 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
35407 vperm = force_reg (V8SImode, vperm);
35408 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
35413 /* Return true if permutation D can be performed as VMODE permutation
35417 valid_perm_using_mode_p (enum machine_mode vmode, struct expand_vec_perm_d *d)
35419 unsigned int i, j, chunk;
35421 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT
35422 || GET_MODE_CLASS (d->vmode) != MODE_VECTOR_INT
35423 || GET_MODE_SIZE (vmode) != GET_MODE_SIZE (d->vmode))
35426 if (GET_MODE_NUNITS (vmode) >= d->nelt)
35429 chunk = d->nelt / GET_MODE_NUNITS (vmode);
35430 for (i = 0; i < d->nelt; i += chunk)
35431 if (d->perm[i] & (chunk - 1))
35434 for (j = 1; j < chunk; ++j)
35435 if (d->perm[i] + j != d->perm[i + j])
35441 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
35442 in terms of pshufb, vpperm, vpermq, vpermd or vperm2i128. */
35445 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
35447 unsigned i, nelt, eltsz, mask;
35448 unsigned char perm[32];
35449 enum machine_mode vmode = V16QImode;
35450 rtx rperm[32], vperm, target, op0, op1;
35454 if (d->op0 != d->op1)
35456 if (!TARGET_XOP || GET_MODE_SIZE (d->vmode) != 16)
35459 && valid_perm_using_mode_p (V2TImode, d))
35464 /* Use vperm2i128 insn. The pattern uses
35465 V4DImode instead of V2TImode. */
35466 target = gen_lowpart (V4DImode, d->target);
35467 op0 = gen_lowpart (V4DImode, d->op0);
35468 op1 = gen_lowpart (V4DImode, d->op1);
35470 = GEN_INT (((d->perm[0] & (nelt / 2)) ? 1 : 0)
35471 || ((d->perm[nelt / 2] & (nelt / 2)) ? 2 : 0));
35472 emit_insn (gen_avx2_permv2ti (target, op0, op1, rperm[0]));
35480 if (GET_MODE_SIZE (d->vmode) == 16)
35485 else if (GET_MODE_SIZE (d->vmode) == 32)
35490 /* V4DImode should be already handled through
35491 expand_vselect by vpermq instruction. */
35492 gcc_assert (d->vmode != V4DImode);
35495 if (d->vmode == V8SImode
35496 || d->vmode == V16HImode
35497 || d->vmode == V32QImode)
35499 /* First see if vpermq can be used for
35500 V8SImode/V16HImode/V32QImode. */
35501 if (valid_perm_using_mode_p (V4DImode, d))
35503 for (i = 0; i < 4; i++)
35504 perm[i] = (d->perm[i * nelt / 4] * 4 / nelt) & 3;
35507 return expand_vselect (gen_lowpart (V4DImode, d->target),
35508 gen_lowpart (V4DImode, d->op0),
35512 /* Next see if vpermd can be used. */
35513 if (valid_perm_using_mode_p (V8SImode, d))
35517 if (vmode == V32QImode)
35519 /* vpshufb only works intra lanes, it is not
35520 possible to shuffle bytes in between the lanes. */
35521 for (i = 0; i < nelt; ++i)
35522 if ((d->perm[i] ^ i) & (nelt / 2))
35533 if (vmode == V8SImode)
35534 for (i = 0; i < 8; ++i)
35535 rperm[i] = GEN_INT ((d->perm[i * nelt / 8] * 8 / nelt) & 7);
35538 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
35539 if (d->op0 != d->op1)
35540 mask = 2 * nelt - 1;
35541 else if (vmode == V16QImode)
35544 mask = nelt / 2 - 1;
35546 for (i = 0; i < nelt; ++i)
35548 unsigned j, e = d->perm[i] & mask;
35549 for (j = 0; j < eltsz; ++j)
35550 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
35554 vperm = gen_rtx_CONST_VECTOR (vmode,
35555 gen_rtvec_v (GET_MODE_NUNITS (vmode), rperm));
35556 vperm = force_reg (vmode, vperm);
35558 target = gen_lowpart (vmode, d->target);
35559 op0 = gen_lowpart (vmode, d->op0);
35560 if (d->op0 == d->op1)
35562 if (vmode == V16QImode)
35563 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
35564 else if (vmode == V32QImode)
35565 emit_insn (gen_avx2_pshufbv32qi3 (target, op0, vperm));
35567 emit_insn (gen_avx2_permvarv8si (target, vperm, op0));
35571 op1 = gen_lowpart (vmode, d->op1);
35572 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
35578 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
35579 in a single instruction. */
35582 expand_vec_perm_1 (struct expand_vec_perm_d *d)
35584 unsigned i, nelt = d->nelt;
35585 unsigned char perm2[MAX_VECT_LEN];
35587 /* Check plain VEC_SELECT first, because AVX has instructions that could
35588 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
35589 input where SEL+CONCAT may not. */
35590 if (d->op0 == d->op1)
35592 int mask = nelt - 1;
35593 bool identity_perm = true;
35594 bool broadcast_perm = true;
35596 for (i = 0; i < nelt; i++)
35598 perm2[i] = d->perm[i] & mask;
35600 identity_perm = false;
35602 broadcast_perm = false;
35608 emit_move_insn (d->target, d->op0);
35611 else if (broadcast_perm && TARGET_AVX2)
35613 /* Use vpbroadcast{b,w,d}. */
35614 rtx op = d->op0, (*gen) (rtx, rtx) = NULL;
35618 op = gen_lowpart (V16QImode, op);
35619 gen = gen_avx2_pbroadcastv32qi;
35622 op = gen_lowpart (V8HImode, op);
35623 gen = gen_avx2_pbroadcastv16hi;
35626 op = gen_lowpart (V4SImode, op);
35627 gen = gen_avx2_pbroadcastv8si;
35630 gen = gen_avx2_pbroadcastv16qi;
35633 gen = gen_avx2_pbroadcastv8hi;
35635 /* For other modes prefer other shuffles this function creates. */
35641 emit_insn (gen (d->target, op));
35646 if (expand_vselect (d->target, d->op0, perm2, nelt))
35649 /* There are plenty of patterns in sse.md that are written for
35650 SEL+CONCAT and are not replicated for a single op. Perhaps
35651 that should be changed, to avoid the nastiness here. */
35653 /* Recognize interleave style patterns, which means incrementing
35654 every other permutation operand. */
35655 for (i = 0; i < nelt; i += 2)
35657 perm2[i] = d->perm[i] & mask;
35658 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
35660 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
35663 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
35666 for (i = 0; i < nelt; i += 4)
35668 perm2[i + 0] = d->perm[i + 0] & mask;
35669 perm2[i + 1] = d->perm[i + 1] & mask;
35670 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
35671 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
35674 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
35679 /* Finally, try the fully general two operand permute. */
35680 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
35683 /* Recognize interleave style patterns with reversed operands. */
35684 if (d->op0 != d->op1)
35686 for (i = 0; i < nelt; ++i)
35688 unsigned e = d->perm[i];
35696 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
35700 /* Try the SSE4.1 blend variable merge instructions. */
35701 if (expand_vec_perm_blend (d))
35704 /* Try one of the AVX vpermil variable permutations. */
35705 if (expand_vec_perm_vpermil (d))
35708 /* Try the SSSE3 pshufb or XOP vpperm or AVX2 vperm2i128,
35709 vpshufb, vpermd or vpermq variable permutation. */
35710 if (expand_vec_perm_pshufb (d))
35716 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
35717 in terms of a pair of pshuflw + pshufhw instructions. */
35720 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
35722 unsigned char perm2[MAX_VECT_LEN];
35726 if (d->vmode != V8HImode || d->op0 != d->op1)
35729 /* The two permutations only operate in 64-bit lanes. */
35730 for (i = 0; i < 4; ++i)
35731 if (d->perm[i] >= 4)
35733 for (i = 4; i < 8; ++i)
35734 if (d->perm[i] < 4)
35740 /* Emit the pshuflw. */
35741 memcpy (perm2, d->perm, 4);
35742 for (i = 4; i < 8; ++i)
35744 ok = expand_vselect (d->target, d->op0, perm2, 8);
35747 /* Emit the pshufhw. */
35748 memcpy (perm2 + 4, d->perm + 4, 4);
35749 for (i = 0; i < 4; ++i)
35751 ok = expand_vselect (d->target, d->target, perm2, 8);
35757 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
35758 the permutation using the SSSE3 palignr instruction. This succeeds
35759 when all of the elements in PERM fit within one vector and we merely
35760 need to shift them down so that a single vector permutation has a
35761 chance to succeed. */
35764 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
35766 unsigned i, nelt = d->nelt;
35771 /* Even with AVX, palignr only operates on 128-bit vectors. */
35772 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
35775 min = nelt, max = 0;
35776 for (i = 0; i < nelt; ++i)
35778 unsigned e = d->perm[i];
35784 if (min == 0 || max - min >= nelt)
35787 /* Given that we have SSSE3, we know we'll be able to implement the
35788 single operand permutation after the palignr with pshufb. */
35792 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
35793 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
35794 gen_lowpart (TImode, d->op1),
35795 gen_lowpart (TImode, d->op0), shift));
35797 d->op0 = d->op1 = d->target;
35800 for (i = 0; i < nelt; ++i)
35802 unsigned e = d->perm[i] - min;
35808 /* Test for the degenerate case where the alignment by itself
35809 produces the desired permutation. */
35813 ok = expand_vec_perm_1 (d);
35819 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
35820 a two vector permutation into a single vector permutation by using
35821 an interleave operation to merge the vectors. */
35824 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
35826 struct expand_vec_perm_d dremap, dfinal;
35827 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
35828 unsigned HOST_WIDE_INT contents;
35829 unsigned char remap[2 * MAX_VECT_LEN];
35831 bool ok, same_halves = false;
35833 if (GET_MODE_SIZE (d->vmode) == 16)
35835 if (d->op0 == d->op1)
35838 else if (GET_MODE_SIZE (d->vmode) == 32)
35842 /* For 32-byte modes allow even d->op0 == d->op1.
35843 The lack of cross-lane shuffling in some instructions
35844 might prevent a single insn shuffle. */
35849 /* Examine from whence the elements come. */
35851 for (i = 0; i < nelt; ++i)
35852 contents |= ((unsigned HOST_WIDE_INT) 1) << d->perm[i];
35854 memset (remap, 0xff, sizeof (remap));
35857 if (GET_MODE_SIZE (d->vmode) == 16)
35859 unsigned HOST_WIDE_INT h1, h2, h3, h4;
35861 /* Split the two input vectors into 4 halves. */
35862 h1 = (((unsigned HOST_WIDE_INT) 1) << nelt2) - 1;
35867 /* If the elements from the low halves use interleave low, and similarly
35868 for interleave high. If the elements are from mis-matched halves, we
35869 can use shufps for V4SF/V4SI or do a DImode shuffle. */
35870 if ((contents & (h1 | h3)) == contents)
35873 for (i = 0; i < nelt2; ++i)
35876 remap[i + nelt] = i * 2 + 1;
35877 dremap.perm[i * 2] = i;
35878 dremap.perm[i * 2 + 1] = i + nelt;
35880 if (!TARGET_SSE2 && d->vmode == V4SImode)
35881 dremap.vmode = V4SFmode;
35883 else if ((contents & (h2 | h4)) == contents)
35886 for (i = 0; i < nelt2; ++i)
35888 remap[i + nelt2] = i * 2;
35889 remap[i + nelt + nelt2] = i * 2 + 1;
35890 dremap.perm[i * 2] = i + nelt2;
35891 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
35893 if (!TARGET_SSE2 && d->vmode == V4SImode)
35894 dremap.vmode = V4SFmode;
35896 else if ((contents & (h1 | h4)) == contents)
35899 for (i = 0; i < nelt2; ++i)
35902 remap[i + nelt + nelt2] = i + nelt2;
35903 dremap.perm[i] = i;
35904 dremap.perm[i + nelt2] = i + nelt + nelt2;
35909 dremap.vmode = V2DImode;
35911 dremap.perm[0] = 0;
35912 dremap.perm[1] = 3;
35915 else if ((contents & (h2 | h3)) == contents)
35918 for (i = 0; i < nelt2; ++i)
35920 remap[i + nelt2] = i;
35921 remap[i + nelt] = i + nelt2;
35922 dremap.perm[i] = i + nelt2;
35923 dremap.perm[i + nelt2] = i + nelt;
35928 dremap.vmode = V2DImode;
35930 dremap.perm[0] = 1;
35931 dremap.perm[1] = 2;
35939 unsigned int nelt4 = nelt / 4, nzcnt = 0;
35940 unsigned HOST_WIDE_INT q[8];
35941 unsigned int nonzero_halves[4];
35943 /* Split the two input vectors into 8 quarters. */
35944 q[0] = (((unsigned HOST_WIDE_INT) 1) << nelt4) - 1;
35945 for (i = 1; i < 8; ++i)
35946 q[i] = q[0] << (nelt4 * i);
35947 for (i = 0; i < 4; ++i)
35948 if (((q[2 * i] | q[2 * i + 1]) & contents) != 0)
35950 nonzero_halves[nzcnt] = i;
35956 gcc_assert (d->op0 == d->op1);
35957 nonzero_halves[1] = nonzero_halves[0];
35958 same_halves = true;
35960 else if (d->op0 == d->op1)
35962 gcc_assert (nonzero_halves[0] == 0);
35963 gcc_assert (nonzero_halves[1] == 1);
35968 if (d->perm[0] / nelt2 == nonzero_halves[1])
35970 /* Attempt to increase the likelyhood that dfinal
35971 shuffle will be intra-lane. */
35972 char tmph = nonzero_halves[0];
35973 nonzero_halves[0] = nonzero_halves[1];
35974 nonzero_halves[1] = tmph;
35977 /* vperm2f128 or vperm2i128. */
35978 for (i = 0; i < nelt2; ++i)
35980 remap[i + nonzero_halves[1] * nelt2] = i + nelt2;
35981 remap[i + nonzero_halves[0] * nelt2] = i;
35982 dremap.perm[i + nelt2] = i + nonzero_halves[1] * nelt2;
35983 dremap.perm[i] = i + nonzero_halves[0] * nelt2;
35986 if (d->vmode != V8SFmode
35987 && d->vmode != V4DFmode
35988 && d->vmode != V8SImode)
35990 dremap.vmode = V8SImode;
35992 for (i = 0; i < 4; ++i)
35994 dremap.perm[i] = i + nonzero_halves[0] * 4;
35995 dremap.perm[i + 4] = i + nonzero_halves[1] * 4;
35999 else if (d->op0 == d->op1)
36001 else if (TARGET_AVX2
36002 && (contents & (q[0] | q[2] | q[4] | q[6])) == contents)
36005 for (i = 0; i < nelt4; ++i)
36008 remap[i + nelt] = i * 2 + 1;
36009 remap[i + nelt2] = i * 2 + nelt2;
36010 remap[i + nelt + nelt2] = i * 2 + nelt2 + 1;
36011 dremap.perm[i * 2] = i;
36012 dremap.perm[i * 2 + 1] = i + nelt;
36013 dremap.perm[i * 2 + nelt2] = i + nelt2;
36014 dremap.perm[i * 2 + nelt2 + 1] = i + nelt + nelt2;
36017 else if (TARGET_AVX2
36018 && (contents & (q[1] | q[3] | q[5] | q[7])) == contents)
36021 for (i = 0; i < nelt4; ++i)
36023 remap[i + nelt4] = i * 2;
36024 remap[i + nelt + nelt4] = i * 2 + 1;
36025 remap[i + nelt2 + nelt4] = i * 2 + nelt2;
36026 remap[i + nelt + nelt2 + nelt4] = i * 2 + nelt2 + 1;
36027 dremap.perm[i * 2] = i + nelt4;
36028 dremap.perm[i * 2 + 1] = i + nelt + nelt4;
36029 dremap.perm[i * 2 + nelt2] = i + nelt2 + nelt4;
36030 dremap.perm[i * 2 + nelt2 + 1] = i + nelt + nelt2 + nelt4;
36037 /* Use the remapping array set up above to move the elements from their
36038 swizzled locations into their final destinations. */
36040 for (i = 0; i < nelt; ++i)
36042 unsigned e = remap[d->perm[i]];
36043 gcc_assert (e < nelt);
36044 /* If same_halves is true, both halves of the remapped vector are the
36045 same. Avoid cross-lane accesses if possible. */
36046 if (same_halves && i >= nelt2)
36048 gcc_assert (e < nelt2);
36049 dfinal.perm[i] = e + nelt2;
36052 dfinal.perm[i] = e;
36054 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
36055 dfinal.op1 = dfinal.op0;
36056 dremap.target = dfinal.op0;
36058 /* Test if the final remap can be done with a single insn. For V4SFmode or
36059 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
36061 ok = expand_vec_perm_1 (&dfinal);
36062 seq = get_insns ();
36071 if (dremap.vmode != dfinal.vmode)
36073 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
36074 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
36075 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
36078 ok = expand_vec_perm_1 (&dremap);
36085 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
36086 a single vector cross-lane permutation into vpermq followed
36087 by any of the single insn permutations. */
36090 expand_vec_perm_vpermq_perm_1 (struct expand_vec_perm_d *d)
36092 struct expand_vec_perm_d dremap, dfinal;
36093 unsigned i, j, nelt = d->nelt, nelt2 = nelt / 2, nelt4 = nelt / 4;
36094 unsigned contents[2];
36098 && (d->vmode == V32QImode || d->vmode == V16HImode)
36099 && d->op0 == d->op1))
36104 for (i = 0; i < nelt2; ++i)
36106 contents[0] |= 1u << (d->perm[i] / nelt4);
36107 contents[1] |= 1u << (d->perm[i + nelt2] / nelt4);
36110 for (i = 0; i < 2; ++i)
36112 unsigned int cnt = 0;
36113 for (j = 0; j < 4; ++j)
36114 if ((contents[i] & (1u << j)) != 0 && ++cnt > 2)
36122 dremap.vmode = V4DImode;
36124 dremap.target = gen_reg_rtx (V4DImode);
36125 dremap.op0 = gen_lowpart (V4DImode, d->op0);
36126 dremap.op1 = dremap.op0;
36127 for (i = 0; i < 2; ++i)
36129 unsigned int cnt = 0;
36130 for (j = 0; j < 4; ++j)
36131 if ((contents[i] & (1u << j)) != 0)
36132 dremap.perm[2 * i + cnt++] = j;
36133 for (; cnt < 2; ++cnt)
36134 dremap.perm[2 * i + cnt] = 0;
36138 dfinal.op0 = gen_lowpart (dfinal.vmode, dremap.target);
36139 dfinal.op1 = dfinal.op0;
36140 for (i = 0, j = 0; i < nelt; ++i)
36144 dfinal.perm[i] = (d->perm[i] & (nelt4 - 1)) | (j ? nelt2 : 0);
36145 if ((d->perm[i] / nelt4) == dremap.perm[j])
36147 else if ((d->perm[i] / nelt4) == dremap.perm[j + 1])
36148 dfinal.perm[i] |= nelt4;
36150 gcc_unreachable ();
36153 ok = expand_vec_perm_1 (&dremap);
36156 ok = expand_vec_perm_1 (&dfinal);
36162 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
36163 a two vector permutation using 2 intra-lane interleave insns
36164 and cross-lane shuffle for 32-byte vectors. */
36167 expand_vec_perm_interleave3 (struct expand_vec_perm_d *d)
36170 rtx (*gen) (rtx, rtx, rtx);
36172 if (d->op0 == d->op1)
36174 if (TARGET_AVX2 && GET_MODE_SIZE (d->vmode) == 32)
36176 else if (TARGET_AVX && (d->vmode == V8SFmode || d->vmode == V4DFmode))
36182 if (d->perm[0] != 0 && d->perm[0] != nelt / 2)
36184 for (i = 0; i < nelt; i += 2)
36185 if (d->perm[i] != d->perm[0] + i / 2
36186 || d->perm[i + 1] != d->perm[0] + i / 2 + nelt)
36196 gen = gen_vec_interleave_highv32qi;
36198 gen = gen_vec_interleave_lowv32qi;
36202 gen = gen_vec_interleave_highv16hi;
36204 gen = gen_vec_interleave_lowv16hi;
36208 gen = gen_vec_interleave_highv8si;
36210 gen = gen_vec_interleave_lowv8si;
36214 gen = gen_vec_interleave_highv4di;
36216 gen = gen_vec_interleave_lowv4di;
36220 gen = gen_vec_interleave_highv8sf;
36222 gen = gen_vec_interleave_lowv8sf;
36226 gen = gen_vec_interleave_highv4df;
36228 gen = gen_vec_interleave_lowv4df;
36231 gcc_unreachable ();
36234 emit_insn (gen (d->target, d->op0, d->op1));
36238 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
36239 permutation with two pshufb insns and an ior. We should have already
36240 failed all two instruction sequences. */
36243 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
36245 rtx rperm[2][16], vperm, l, h, op, m128;
36246 unsigned int i, nelt, eltsz;
36248 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
36250 gcc_assert (d->op0 != d->op1);
36253 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
36255 /* Generate two permutation masks. If the required element is within
36256 the given vector it is shuffled into the proper lane. If the required
36257 element is in the other vector, force a zero into the lane by setting
36258 bit 7 in the permutation mask. */
36259 m128 = GEN_INT (-128);
36260 for (i = 0; i < nelt; ++i)
36262 unsigned j, e = d->perm[i];
36263 unsigned which = (e >= nelt);
36267 for (j = 0; j < eltsz; ++j)
36269 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
36270 rperm[1-which][i*eltsz + j] = m128;
36274 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
36275 vperm = force_reg (V16QImode, vperm);
36277 l = gen_reg_rtx (V16QImode);
36278 op = gen_lowpart (V16QImode, d->op0);
36279 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
36281 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
36282 vperm = force_reg (V16QImode, vperm);
36284 h = gen_reg_rtx (V16QImode);
36285 op = gen_lowpart (V16QImode, d->op1);
36286 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
36288 op = gen_lowpart (V16QImode, d->target);
36289 emit_insn (gen_iorv16qi3 (op, l, h));
36294 /* Implement arbitrary permutation of one V32QImode and V16QImode operand
36295 with two vpshufb insns, vpermq and vpor. We should have already failed
36296 all two or three instruction sequences. */
36299 expand_vec_perm_vpshufb2_vpermq (struct expand_vec_perm_d *d)
36301 rtx rperm[2][32], vperm, l, h, hp, op, m128;
36302 unsigned int i, nelt, eltsz;
36305 || d->op0 != d->op1
36306 || (d->vmode != V32QImode && d->vmode != V16HImode))
36313 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
36315 /* Generate two permutation masks. If the required element is within
36316 the same lane, it is shuffled in. If the required element from the
36317 other lane, force a zero by setting bit 7 in the permutation mask.
36318 In the other mask the mask has non-negative elements if element
36319 is requested from the other lane, but also moved to the other lane,
36320 so that the result of vpshufb can have the two V2TImode halves
36322 m128 = GEN_INT (-128);
36323 for (i = 0; i < nelt; ++i)
36325 unsigned j, e = d->perm[i] & (nelt / 2 - 1);
36326 unsigned which = ((d->perm[i] ^ i) & (nelt / 2)) * eltsz;
36328 for (j = 0; j < eltsz; ++j)
36330 rperm[!!which][(i * eltsz + j) ^ which] = GEN_INT (e * eltsz + j);
36331 rperm[!which][(i * eltsz + j) ^ (which ^ 16)] = m128;
36335 vperm = gen_rtx_CONST_VECTOR (V32QImode, gen_rtvec_v (32, rperm[1]));
36336 vperm = force_reg (V32QImode, vperm);
36338 h = gen_reg_rtx (V32QImode);
36339 op = gen_lowpart (V32QImode, d->op0);
36340 emit_insn (gen_avx2_pshufbv32qi3 (h, op, vperm));
36342 /* Swap the 128-byte lanes of h into hp. */
36343 hp = gen_reg_rtx (V4DImode);
36344 op = gen_lowpart (V4DImode, h);
36345 emit_insn (gen_avx2_permv4di_1 (hp, op, const2_rtx, GEN_INT (3), const0_rtx,
36348 vperm = gen_rtx_CONST_VECTOR (V32QImode, gen_rtvec_v (32, rperm[0]));
36349 vperm = force_reg (V32QImode, vperm);
36351 l = gen_reg_rtx (V32QImode);
36352 op = gen_lowpart (V32QImode, d->op0);
36353 emit_insn (gen_avx2_pshufbv32qi3 (l, op, vperm));
36355 op = gen_lowpart (V32QImode, d->target);
36356 emit_insn (gen_iorv32qi3 (op, l, gen_lowpart (V32QImode, hp)));
36361 /* A subroutine of expand_vec_perm_even_odd_1. Implement extract-even
36362 and extract-odd permutations of two V32QImode and V16QImode operand
36363 with two vpshufb insns, vpor and vpermq. We should have already
36364 failed all two or three instruction sequences. */
36367 expand_vec_perm_vpshufb2_vpermq_even_odd (struct expand_vec_perm_d *d)
36369 rtx rperm[2][32], vperm, l, h, ior, op, m128;
36370 unsigned int i, nelt, eltsz;
36373 || d->op0 == d->op1
36374 || (d->vmode != V32QImode && d->vmode != V16HImode))
36377 for (i = 0; i < d->nelt; ++i)
36378 if ((d->perm[i] ^ (i * 2)) & (3 * d->nelt / 2))
36385 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
36387 /* Generate two permutation masks. In the first permutation mask
36388 the first quarter will contain indexes for the first half
36389 of the op0, the second quarter will contain bit 7 set, third quarter
36390 will contain indexes for the second half of the op0 and the
36391 last quarter bit 7 set. In the second permutation mask
36392 the first quarter will contain bit 7 set, the second quarter
36393 indexes for the first half of the op1, the third quarter bit 7 set
36394 and last quarter indexes for the second half of the op1.
36395 I.e. the first mask e.g. for V32QImode extract even will be:
36396 0, 2, ..., 0xe, -128, ..., -128, 0, 2, ..., 0xe, -128, ..., -128
36397 (all values masked with 0xf except for -128) and second mask
36398 for extract even will be
36399 -128, ..., -128, 0, 2, ..., 0xe, -128, ..., -128, 0, 2, ..., 0xe. */
36400 m128 = GEN_INT (-128);
36401 for (i = 0; i < nelt; ++i)
36403 unsigned j, e = d->perm[i] & (nelt / 2 - 1);
36404 unsigned which = d->perm[i] >= nelt;
36405 unsigned xorv = (i >= nelt / 4 && i < 3 * nelt / 4) ? 24 : 0;
36407 for (j = 0; j < eltsz; ++j)
36409 rperm[which][(i * eltsz + j) ^ xorv] = GEN_INT (e * eltsz + j);
36410 rperm[1 - which][(i * eltsz + j) ^ xorv] = m128;
36414 vperm = gen_rtx_CONST_VECTOR (V32QImode, gen_rtvec_v (32, rperm[0]));
36415 vperm = force_reg (V32QImode, vperm);
36417 l = gen_reg_rtx (V32QImode);
36418 op = gen_lowpart (V32QImode, d->op0);
36419 emit_insn (gen_avx2_pshufbv32qi3 (l, op, vperm));
36421 vperm = gen_rtx_CONST_VECTOR (V32QImode, gen_rtvec_v (32, rperm[1]));
36422 vperm = force_reg (V32QImode, vperm);
36424 h = gen_reg_rtx (V32QImode);
36425 op = gen_lowpart (V32QImode, d->op1);
36426 emit_insn (gen_avx2_pshufbv32qi3 (h, op, vperm));
36428 ior = gen_reg_rtx (V32QImode);
36429 emit_insn (gen_iorv32qi3 (ior, l, h));
36431 /* Permute the V4DImode quarters using { 0, 2, 1, 3 } permutation. */
36432 op = gen_lowpart (V4DImode, d->target);
36433 ior = gen_lowpart (V4DImode, ior);
36434 emit_insn (gen_avx2_permv4di_1 (op, ior, const0_rtx, const2_rtx,
36435 const1_rtx, GEN_INT (3)));
36440 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
36441 and extract-odd permutations. */
36444 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
36451 t1 = gen_reg_rtx (V4DFmode);
36452 t2 = gen_reg_rtx (V4DFmode);
36454 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
36455 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
36456 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
36458 /* Now an unpck[lh]pd will produce the result required. */
36460 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
36462 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
36468 int mask = odd ? 0xdd : 0x88;
36470 t1 = gen_reg_rtx (V8SFmode);
36471 t2 = gen_reg_rtx (V8SFmode);
36472 t3 = gen_reg_rtx (V8SFmode);
36474 /* Shuffle within the 128-bit lanes to produce:
36475 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */
36476 emit_insn (gen_avx_shufps256 (t1, d->op0, d->op1,
36479 /* Shuffle the lanes around to produce:
36480 { 4 6 c e 0 2 8 a } and { 5 7 d f 1 3 9 b }. */
36481 emit_insn (gen_avx_vperm2f128v8sf3 (t2, t1, t1,
36484 /* Shuffle within the 128-bit lanes to produce:
36485 { 0 2 4 6 4 6 0 2 } | { 1 3 5 7 5 7 1 3 }. */
36486 emit_insn (gen_avx_shufps256 (t3, t1, t2, GEN_INT (0x44)));
36488 /* Shuffle within the 128-bit lanes to produce:
36489 { 8 a c e c e 8 a } | { 9 b d f d f 9 b }. */
36490 emit_insn (gen_avx_shufps256 (t2, t1, t2, GEN_INT (0xee)));
36492 /* Shuffle the lanes around to produce:
36493 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */
36494 emit_insn (gen_avx_vperm2f128v8sf3 (d->target, t3, t2,
36503 /* These are always directly implementable by expand_vec_perm_1. */
36504 gcc_unreachable ();
36508 return expand_vec_perm_pshufb2 (d);
36511 /* We need 2*log2(N)-1 operations to achieve odd/even
36512 with interleave. */
36513 t1 = gen_reg_rtx (V8HImode);
36514 t2 = gen_reg_rtx (V8HImode);
36515 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
36516 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
36517 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
36518 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
36520 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
36522 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
36529 return expand_vec_perm_pshufb2 (d);
36532 t1 = gen_reg_rtx (V16QImode);
36533 t2 = gen_reg_rtx (V16QImode);
36534 t3 = gen_reg_rtx (V16QImode);
36535 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
36536 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
36537 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
36538 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
36539 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
36540 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
36542 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
36544 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
36551 return expand_vec_perm_vpshufb2_vpermq_even_odd (d);
36556 struct expand_vec_perm_d d_copy = *d;
36557 d_copy.vmode = V4DFmode;
36558 d_copy.target = gen_lowpart (V4DFmode, d->target);
36559 d_copy.op0 = gen_lowpart (V4DFmode, d->op0);
36560 d_copy.op1 = gen_lowpart (V4DFmode, d->op1);
36561 return expand_vec_perm_even_odd_1 (&d_copy, odd);
36564 t1 = gen_reg_rtx (V4DImode);
36565 t2 = gen_reg_rtx (V4DImode);
36567 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
36568 emit_insn (gen_avx2_permv2ti (t1, d->op0, d->op1, GEN_INT (0x20)));
36569 emit_insn (gen_avx2_permv2ti (t2, d->op0, d->op1, GEN_INT (0x31)));
36571 /* Now an vpunpck[lh]qdq will produce the result required. */
36573 t3 = gen_avx2_interleave_highv4di (d->target, t1, t2);
36575 t3 = gen_avx2_interleave_lowv4di (d->target, t1, t2);
36582 struct expand_vec_perm_d d_copy = *d;
36583 d_copy.vmode = V8SFmode;
36584 d_copy.target = gen_lowpart (V8SFmode, d->target);
36585 d_copy.op0 = gen_lowpart (V8SFmode, d->op0);
36586 d_copy.op1 = gen_lowpart (V8SFmode, d->op1);
36587 return expand_vec_perm_even_odd_1 (&d_copy, odd);
36590 t1 = gen_reg_rtx (V8SImode);
36591 t2 = gen_reg_rtx (V8SImode);
36593 /* Shuffle the lanes around into
36594 { 0 1 2 3 8 9 a b } and { 4 5 6 7 c d e f }. */
36595 emit_insn (gen_avx2_permv2ti (gen_lowpart (V4DImode, t1),
36596 gen_lowpart (V4DImode, d->op0),
36597 gen_lowpart (V4DImode, d->op1),
36599 emit_insn (gen_avx2_permv2ti (gen_lowpart (V4DImode, t2),
36600 gen_lowpart (V4DImode, d->op0),
36601 gen_lowpart (V4DImode, d->op1),
36604 /* Swap the 2nd and 3rd position in each lane into
36605 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
36606 emit_insn (gen_avx2_pshufdv3 (t1, t1,
36607 GEN_INT (2 * 4 + 1 * 16 + 3 * 64)));
36608 emit_insn (gen_avx2_pshufdv3 (t2, t2,
36609 GEN_INT (2 * 4 + 1 * 16 + 3 * 64)));
36611 /* Now an vpunpck[lh]qdq will produce
36612 { 0 2 4 6 8 a c e } resp. { 1 3 5 7 9 b d f }. */
36614 t3 = gen_avx2_interleave_highv4di (gen_lowpart (V4DImode, d->target),
36615 gen_lowpart (V4DImode, t1),
36616 gen_lowpart (V4DImode, t2));
36618 t3 = gen_avx2_interleave_lowv4di (gen_lowpart (V4DImode, d->target),
36619 gen_lowpart (V4DImode, t1),
36620 gen_lowpart (V4DImode, t2));
36625 gcc_unreachable ();
36631 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
36632 extract-even and extract-odd permutations. */
36635 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
36637 unsigned i, odd, nelt = d->nelt;
36640 if (odd != 0 && odd != 1)
36643 for (i = 1; i < nelt; ++i)
36644 if (d->perm[i] != 2 * i + odd)
36647 return expand_vec_perm_even_odd_1 (d, odd);
36650 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
36651 permutations. We assume that expand_vec_perm_1 has already failed. */
36654 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
36656 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
36657 enum machine_mode vmode = d->vmode;
36658 unsigned char perm2[4];
36666 /* These are special-cased in sse.md so that we can optionally
36667 use the vbroadcast instruction. They expand to two insns
36668 if the input happens to be in a register. */
36669 gcc_unreachable ();
36675 /* These are always implementable using standard shuffle patterns. */
36676 gcc_unreachable ();
36680 /* These can be implemented via interleave. We save one insn by
36681 stopping once we have promoted to V4SImode and then use pshufd. */
36684 optab otab = vec_interleave_low_optab;
36688 otab = vec_interleave_high_optab;
36693 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
36694 vmode = get_mode_wider_vector (vmode);
36695 op0 = gen_lowpart (vmode, op0);
36697 while (vmode != V4SImode);
36699 memset (perm2, elt, 4);
36700 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
36708 /* For AVX2 broadcasts of the first element vpbroadcast* or
36709 vpermq should be used by expand_vec_perm_1. */
36710 gcc_assert (!TARGET_AVX2 || d->perm[0]);
36714 gcc_unreachable ();
36718 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
36719 broadcast permutations. */
36722 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
36724 unsigned i, elt, nelt = d->nelt;
36726 if (d->op0 != d->op1)
36730 for (i = 1; i < nelt; ++i)
36731 if (d->perm[i] != elt)
36734 return expand_vec_perm_broadcast_1 (d);
36737 /* Implement arbitrary permutation of two V32QImode and V16QImode operands
36738 with 4 vpshufb insns, 2 vpermq and 3 vpor. We should have already failed
36739 all the shorter instruction sequences. */
36742 expand_vec_perm_vpshufb4_vpermq2 (struct expand_vec_perm_d *d)
36744 rtx rperm[4][32], vperm, l[2], h[2], op, m128;
36745 unsigned int i, nelt, eltsz;
36749 || d->op0 == d->op1
36750 || (d->vmode != V32QImode && d->vmode != V16HImode))
36757 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
36759 /* Generate 4 permutation masks. If the required element is within
36760 the same lane, it is shuffled in. If the required element from the
36761 other lane, force a zero by setting bit 7 in the permutation mask.
36762 In the other mask the mask has non-negative elements if element
36763 is requested from the other lane, but also moved to the other lane,
36764 so that the result of vpshufb can have the two V2TImode halves
36766 m128 = GEN_INT (-128);
36767 for (i = 0; i < 32; ++i)
36769 rperm[0][i] = m128;
36770 rperm[1][i] = m128;
36771 rperm[2][i] = m128;
36772 rperm[3][i] = m128;
36778 for (i = 0; i < nelt; ++i)
36780 unsigned j, e = d->perm[i] & (nelt / 2 - 1);
36781 unsigned xlane = ((d->perm[i] ^ i) & (nelt / 2)) * eltsz;
36782 unsigned int which = ((d->perm[i] & nelt) ? 2 : 0) + (xlane ? 1 : 0);
36784 for (j = 0; j < eltsz; ++j)
36785 rperm[which][(i * eltsz + j) ^ xlane] = GEN_INT (e * eltsz + j);
36786 used[which] = true;
36789 for (i = 0; i < 2; ++i)
36791 if (!used[2 * i + 1])
36796 vperm = gen_rtx_CONST_VECTOR (V32QImode,
36797 gen_rtvec_v (32, rperm[2 * i + 1]));
36798 vperm = force_reg (V32QImode, vperm);
36799 h[i] = gen_reg_rtx (V32QImode);
36800 op = gen_lowpart (V32QImode, i ? d->op1 : d->op0);
36801 emit_insn (gen_avx2_pshufbv32qi3 (h[i], op, vperm));
36804 /* Swap the 128-byte lanes of h[X]. */
36805 for (i = 0; i < 2; ++i)
36807 if (h[i] == NULL_RTX)
36809 op = gen_reg_rtx (V4DImode);
36810 emit_insn (gen_avx2_permv4di_1 (op, gen_lowpart (V4DImode, h[i]),
36811 const2_rtx, GEN_INT (3), const0_rtx,
36813 h[i] = gen_lowpart (V32QImode, op);
36816 for (i = 0; i < 2; ++i)
36823 vperm = gen_rtx_CONST_VECTOR (V32QImode, gen_rtvec_v (32, rperm[2 * i]));
36824 vperm = force_reg (V32QImode, vperm);
36825 l[i] = gen_reg_rtx (V32QImode);
36826 op = gen_lowpart (V32QImode, i ? d->op1 : d->op0);
36827 emit_insn (gen_avx2_pshufbv32qi3 (l[i], op, vperm));
36830 for (i = 0; i < 2; ++i)
36834 op = gen_reg_rtx (V32QImode);
36835 emit_insn (gen_iorv32qi3 (op, l[i], h[i]));
36842 gcc_assert (l[0] && l[1]);
36843 op = gen_lowpart (V32QImode, d->target);
36844 emit_insn (gen_iorv32qi3 (op, l[0], l[1]));
36848 /* The guts of ix86_expand_vec_perm_const, also used by the ok hook.
36849 With all of the interface bits taken care of, perform the expansion
36850 in D and return true on success. */
36853 ix86_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
36855 /* Try a single instruction expansion. */
36856 if (expand_vec_perm_1 (d))
36859 /* Try sequences of two instructions. */
36861 if (expand_vec_perm_pshuflw_pshufhw (d))
36864 if (expand_vec_perm_palignr (d))
36867 if (expand_vec_perm_interleave2 (d))
36870 if (expand_vec_perm_broadcast (d))
36873 if (expand_vec_perm_vpermq_perm_1 (d))
36876 /* Try sequences of three instructions. */
36878 if (expand_vec_perm_pshufb2 (d))
36881 if (expand_vec_perm_interleave3 (d))
36884 /* Try sequences of four instructions. */
36886 if (expand_vec_perm_vpshufb2_vpermq (d))
36889 if (expand_vec_perm_vpshufb2_vpermq_even_odd (d))
36892 /* ??? Look for narrow permutations whose element orderings would
36893 allow the promotion to a wider mode. */
36895 /* ??? Look for sequences of interleave or a wider permute that place
36896 the data into the correct lanes for a half-vector shuffle like
36897 pshuf[lh]w or vpermilps. */
36899 /* ??? Look for sequences of interleave that produce the desired results.
36900 The combinatorics of punpck[lh] get pretty ugly... */
36902 if (expand_vec_perm_even_odd (d))
36905 /* Even longer sequences. */
36906 if (expand_vec_perm_vpshufb4_vpermq2 (d))
36913 ix86_expand_vec_perm_const (rtx operands[4])
36915 struct expand_vec_perm_d d;
36916 unsigned char perm[MAX_VECT_LEN];
36917 int i, nelt, which;
36920 d.target = operands[0];
36921 d.op0 = operands[1];
36922 d.op1 = operands[2];
36925 d.vmode = GET_MODE (d.target);
36926 gcc_assert (VECTOR_MODE_P (d.vmode));
36927 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
36928 d.testing_p = false;
36930 gcc_assert (GET_CODE (sel) == CONST_VECTOR);
36931 gcc_assert (XVECLEN (sel, 0) == nelt);
36932 gcc_checking_assert (sizeof (d.perm) == sizeof (perm));
36934 for (i = which = 0; i < nelt; ++i)
36936 rtx e = XVECEXP (sel, 0, i);
36937 int ei = INTVAL (e) & (2 * nelt - 1);
36939 which |= (ei < nelt ? 1 : 2);
36950 if (!rtx_equal_p (d.op0, d.op1))
36953 /* The elements of PERM do not suggest that only the first operand
36954 is used, but both operands are identical. Allow easier matching
36955 of the permutation by folding the permutation into the single
36957 for (i = 0; i < nelt; ++i)
36958 if (d.perm[i] >= nelt)
36967 for (i = 0; i < nelt; ++i)
36973 if (ix86_expand_vec_perm_const_1 (&d))
36976 /* If the mask says both arguments are needed, but they are the same,
36977 the above tried to expand with d.op0 == d.op1. If that didn't work,
36978 retry with d.op0 != d.op1 as that is what testing has been done with. */
36979 if (which == 3 && d.op0 == d.op1)
36984 memcpy (d.perm, perm, sizeof (perm));
36985 d.op1 = gen_reg_rtx (d.vmode);
36987 ok = ix86_expand_vec_perm_const_1 (&d);
36988 seq = get_insns ();
36992 emit_move_insn (d.op1, d.op0);
37001 /* Implement targetm.vectorize.vec_perm_const_ok. */
37004 ix86_vectorize_vec_perm_const_ok (enum machine_mode vmode,
37005 const unsigned char *sel)
37007 struct expand_vec_perm_d d;
37008 unsigned int i, nelt, which;
37012 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
37013 d.testing_p = true;
37015 /* Given sufficient ISA support we can just return true here
37016 for selected vector modes. */
37017 if (GET_MODE_SIZE (d.vmode) == 16)
37019 /* All implementable with a single vpperm insn. */
37022 /* All implementable with 2 pshufb + 1 ior. */
37025 /* All implementable with shufpd or unpck[lh]pd. */
37030 /* Extract the values from the vector CST into the permutation
37032 memcpy (d.perm, sel, nelt);
37033 for (i = which = 0; i < nelt; ++i)
37035 unsigned char e = d.perm[i];
37036 gcc_assert (e < 2 * nelt);
37037 which |= (e < nelt ? 1 : 2);
37040 /* For all elements from second vector, fold the elements to first. */
37042 for (i = 0; i < nelt; ++i)
37045 /* Check whether the mask can be applied to the vector type. */
37046 one_vec = (which != 3);
37048 /* Implementable with shufps or pshufd. */
37049 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
37052 /* Otherwise we have to go through the motions and see if we can
37053 figure out how to generate the requested permutation. */
37054 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
37055 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
37057 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
37060 ret = ix86_expand_vec_perm_const_1 (&d);
37067 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
37069 struct expand_vec_perm_d d;
37075 d.vmode = GET_MODE (targ);
37076 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
37077 d.testing_p = false;
37079 for (i = 0; i < nelt; ++i)
37080 d.perm[i] = i * 2 + odd;
37082 /* We'll either be able to implement the permutation directly... */
37083 if (expand_vec_perm_1 (&d))
37086 /* ... or we use the special-case patterns. */
37087 expand_vec_perm_even_odd_1 (&d, odd);
37090 /* Expand an insert into a vector register through pinsr insn.
37091 Return true if successful. */
37094 ix86_expand_pinsr (rtx *operands)
37096 rtx dst = operands[0];
37097 rtx src = operands[3];
37099 unsigned int size = INTVAL (operands[1]);
37100 unsigned int pos = INTVAL (operands[2]);
37102 if (GET_CODE (dst) == SUBREG)
37104 pos += SUBREG_BYTE (dst) * BITS_PER_UNIT;
37105 dst = SUBREG_REG (dst);
37108 if (GET_CODE (src) == SUBREG)
37109 src = SUBREG_REG (src);
37111 switch (GET_MODE (dst))
37118 enum machine_mode srcmode, dstmode;
37119 rtx (*pinsr)(rtx, rtx, rtx, rtx);
37121 srcmode = mode_for_size (size, MODE_INT, 0);
37126 if (!TARGET_SSE4_1)
37128 dstmode = V16QImode;
37129 pinsr = gen_sse4_1_pinsrb;
37135 dstmode = V8HImode;
37136 pinsr = gen_sse2_pinsrw;
37140 if (!TARGET_SSE4_1)
37142 dstmode = V4SImode;
37143 pinsr = gen_sse4_1_pinsrd;
37147 gcc_assert (TARGET_64BIT);
37148 if (!TARGET_SSE4_1)
37150 dstmode = V2DImode;
37151 pinsr = gen_sse4_1_pinsrq;
37158 dst = gen_lowpart (dstmode, dst);
37159 src = gen_lowpart (srcmode, src);
37163 emit_insn (pinsr (dst, dst, src, GEN_INT (1 << pos)));
37172 /* This function returns the calling abi specific va_list type node.
37173 It returns the FNDECL specific va_list type. */
37176 ix86_fn_abi_va_list (tree fndecl)
37179 return va_list_type_node;
37180 gcc_assert (fndecl != NULL_TREE);
37182 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
37183 return ms_va_list_type_node;
37185 return sysv_va_list_type_node;
37188 /* Returns the canonical va_list type specified by TYPE. If there
37189 is no valid TYPE provided, it return NULL_TREE. */
37192 ix86_canonical_va_list_type (tree type)
37196 /* Resolve references and pointers to va_list type. */
37197 if (TREE_CODE (type) == MEM_REF)
37198 type = TREE_TYPE (type);
37199 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
37200 type = TREE_TYPE (type);
37201 else if (POINTER_TYPE_P (type) && TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
37202 type = TREE_TYPE (type);
37204 if (TARGET_64BIT && va_list_type_node != NULL_TREE)
37206 wtype = va_list_type_node;
37207 gcc_assert (wtype != NULL_TREE);
37209 if (TREE_CODE (wtype) == ARRAY_TYPE)
37211 /* If va_list is an array type, the argument may have decayed
37212 to a pointer type, e.g. by being passed to another function.
37213 In that case, unwrap both types so that we can compare the
37214 underlying records. */
37215 if (TREE_CODE (htype) == ARRAY_TYPE
37216 || POINTER_TYPE_P (htype))
37218 wtype = TREE_TYPE (wtype);
37219 htype = TREE_TYPE (htype);
37222 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
37223 return va_list_type_node;
37224 wtype = sysv_va_list_type_node;
37225 gcc_assert (wtype != NULL_TREE);
37227 if (TREE_CODE (wtype) == ARRAY_TYPE)
37229 /* If va_list is an array type, the argument may have decayed
37230 to a pointer type, e.g. by being passed to another function.
37231 In that case, unwrap both types so that we can compare the
37232 underlying records. */
37233 if (TREE_CODE (htype) == ARRAY_TYPE
37234 || POINTER_TYPE_P (htype))
37236 wtype = TREE_TYPE (wtype);
37237 htype = TREE_TYPE (htype);
37240 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
37241 return sysv_va_list_type_node;
37242 wtype = ms_va_list_type_node;
37243 gcc_assert (wtype != NULL_TREE);
37245 if (TREE_CODE (wtype) == ARRAY_TYPE)
37247 /* If va_list is an array type, the argument may have decayed
37248 to a pointer type, e.g. by being passed to another function.
37249 In that case, unwrap both types so that we can compare the
37250 underlying records. */
37251 if (TREE_CODE (htype) == ARRAY_TYPE
37252 || POINTER_TYPE_P (htype))
37254 wtype = TREE_TYPE (wtype);
37255 htype = TREE_TYPE (htype);
37258 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
37259 return ms_va_list_type_node;
37262 return std_canonical_va_list_type (type);
37265 /* Iterate through the target-specific builtin types for va_list.
37266 IDX denotes the iterator, *PTREE is set to the result type of
37267 the va_list builtin, and *PNAME to its internal type.
37268 Returns zero if there is no element for this index, otherwise
37269 IDX should be increased upon the next call.
37270 Note, do not iterate a base builtin's name like __builtin_va_list.
37271 Used from c_common_nodes_and_builtins. */
37274 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
37284 *ptree = ms_va_list_type_node;
37285 *pname = "__builtin_ms_va_list";
37289 *ptree = sysv_va_list_type_node;
37290 *pname = "__builtin_sysv_va_list";
37298 #undef TARGET_SCHED_DISPATCH
37299 #define TARGET_SCHED_DISPATCH has_dispatch
37300 #undef TARGET_SCHED_DISPATCH_DO
37301 #define TARGET_SCHED_DISPATCH_DO do_dispatch
37302 #undef TARGET_SCHED_REASSOCIATION_WIDTH
37303 #define TARGET_SCHED_REASSOCIATION_WIDTH ix86_reassociation_width
37305 /* The size of the dispatch window is the total number of bytes of
37306 object code allowed in a window. */
37307 #define DISPATCH_WINDOW_SIZE 16
37309 /* Number of dispatch windows considered for scheduling. */
37310 #define MAX_DISPATCH_WINDOWS 3
37312 /* Maximum number of instructions in a window. */
37315 /* Maximum number of immediate operands in a window. */
37318 /* Maximum number of immediate bits allowed in a window. */
37319 #define MAX_IMM_SIZE 128
37321 /* Maximum number of 32 bit immediates allowed in a window. */
37322 #define MAX_IMM_32 4
37324 /* Maximum number of 64 bit immediates allowed in a window. */
37325 #define MAX_IMM_64 2
37327 /* Maximum total of loads or prefetches allowed in a window. */
37330 /* Maximum total of stores allowed in a window. */
37331 #define MAX_STORE 1
37337 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
37338 enum dispatch_group {
37353 /* Number of allowable groups in a dispatch window. It is an array
37354 indexed by dispatch_group enum. 100 is used as a big number,
37355 because the number of these kind of operations does not have any
37356 effect in dispatch window, but we need them for other reasons in
37358 static unsigned int num_allowable_groups[disp_last] = {
37359 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG, BIG
37362 char group_name[disp_last + 1][16] = {
37363 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
37364 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
37365 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
37368 /* Instruction path. */
37371 path_single, /* Single micro op. */
37372 path_double, /* Double micro op. */
37373 path_multi, /* Instructions with more than 2 micro op.. */
37377 /* sched_insn_info defines a window to the instructions scheduled in
37378 the basic block. It contains a pointer to the insn_info table and
37379 the instruction scheduled.
37381 Windows are allocated for each basic block and are linked
37383 typedef struct sched_insn_info_s {
37385 enum dispatch_group group;
37386 enum insn_path path;
37391 /* Linked list of dispatch windows. This is a two way list of
37392 dispatch windows of a basic block. It contains information about
37393 the number of uops in the window and the total number of
37394 instructions and of bytes in the object code for this dispatch
37396 typedef struct dispatch_windows_s {
37397 int num_insn; /* Number of insn in the window. */
37398 int num_uops; /* Number of uops in the window. */
37399 int window_size; /* Number of bytes in the window. */
37400 int window_num; /* Window number between 0 or 1. */
37401 int num_imm; /* Number of immediates in an insn. */
37402 int num_imm_32; /* Number of 32 bit immediates in an insn. */
37403 int num_imm_64; /* Number of 64 bit immediates in an insn. */
37404 int imm_size; /* Total immediates in the window. */
37405 int num_loads; /* Total memory loads in the window. */
37406 int num_stores; /* Total memory stores in the window. */
37407 int violation; /* Violation exists in window. */
37408 sched_insn_info *window; /* Pointer to the window. */
37409 struct dispatch_windows_s *next;
37410 struct dispatch_windows_s *prev;
37411 } dispatch_windows;
37413 /* Immediate valuse used in an insn. */
37414 typedef struct imm_info_s
37421 static dispatch_windows *dispatch_window_list;
37422 static dispatch_windows *dispatch_window_list1;
37424 /* Get dispatch group of insn. */
37426 static enum dispatch_group
37427 get_mem_group (rtx insn)
37429 enum attr_memory memory;
37431 if (INSN_CODE (insn) < 0)
37432 return disp_no_group;
37433 memory = get_attr_memory (insn);
37434 if (memory == MEMORY_STORE)
37437 if (memory == MEMORY_LOAD)
37440 if (memory == MEMORY_BOTH)
37441 return disp_load_store;
37443 return disp_no_group;
37446 /* Return true if insn is a compare instruction. */
37451 enum attr_type type;
37453 type = get_attr_type (insn);
37454 return (type == TYPE_TEST
37455 || type == TYPE_ICMP
37456 || type == TYPE_FCMP
37457 || GET_CODE (PATTERN (insn)) == COMPARE);
37460 /* Return true if a dispatch violation encountered. */
37463 dispatch_violation (void)
37465 if (dispatch_window_list->next)
37466 return dispatch_window_list->next->violation;
37467 return dispatch_window_list->violation;
37470 /* Return true if insn is a branch instruction. */
37473 is_branch (rtx insn)
37475 return (CALL_P (insn) || JUMP_P (insn));
37478 /* Return true if insn is a prefetch instruction. */
37481 is_prefetch (rtx insn)
37483 return NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == PREFETCH;
37486 /* This function initializes a dispatch window and the list container holding a
37487 pointer to the window. */
37490 init_window (int window_num)
37493 dispatch_windows *new_list;
37495 if (window_num == 0)
37496 new_list = dispatch_window_list;
37498 new_list = dispatch_window_list1;
37500 new_list->num_insn = 0;
37501 new_list->num_uops = 0;
37502 new_list->window_size = 0;
37503 new_list->next = NULL;
37504 new_list->prev = NULL;
37505 new_list->window_num = window_num;
37506 new_list->num_imm = 0;
37507 new_list->num_imm_32 = 0;
37508 new_list->num_imm_64 = 0;
37509 new_list->imm_size = 0;
37510 new_list->num_loads = 0;
37511 new_list->num_stores = 0;
37512 new_list->violation = false;
37514 for (i = 0; i < MAX_INSN; i++)
37516 new_list->window[i].insn = NULL;
37517 new_list->window[i].group = disp_no_group;
37518 new_list->window[i].path = no_path;
37519 new_list->window[i].byte_len = 0;
37520 new_list->window[i].imm_bytes = 0;
37525 /* This function allocates and initializes a dispatch window and the
37526 list container holding a pointer to the window. */
37528 static dispatch_windows *
37529 allocate_window (void)
37531 dispatch_windows *new_list = XNEW (struct dispatch_windows_s);
37532 new_list->window = XNEWVEC (struct sched_insn_info_s, MAX_INSN + 1);
37537 /* This routine initializes the dispatch scheduling information. It
37538 initiates building dispatch scheduler tables and constructs the
37539 first dispatch window. */
37542 init_dispatch_sched (void)
37544 /* Allocate a dispatch list and a window. */
37545 dispatch_window_list = allocate_window ();
37546 dispatch_window_list1 = allocate_window ();
37551 /* This function returns true if a branch is detected. End of a basic block
37552 does not have to be a branch, but here we assume only branches end a
37556 is_end_basic_block (enum dispatch_group group)
37558 return group == disp_branch;
37561 /* This function is called when the end of a window processing is reached. */
37564 process_end_window (void)
37566 gcc_assert (dispatch_window_list->num_insn <= MAX_INSN);
37567 if (dispatch_window_list->next)
37569 gcc_assert (dispatch_window_list1->num_insn <= MAX_INSN);
37570 gcc_assert (dispatch_window_list->window_size
37571 + dispatch_window_list1->window_size <= 48);
37577 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
37578 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
37579 for 48 bytes of instructions. Note that these windows are not dispatch
37580 windows that their sizes are DISPATCH_WINDOW_SIZE. */
37582 static dispatch_windows *
37583 allocate_next_window (int window_num)
37585 if (window_num == 0)
37587 if (dispatch_window_list->next)
37590 return dispatch_window_list;
37593 dispatch_window_list->next = dispatch_window_list1;
37594 dispatch_window_list1->prev = dispatch_window_list;
37596 return dispatch_window_list1;
37599 /* Increment the number of immediate operands of an instruction. */
37602 find_constant_1 (rtx *in_rtx, imm_info *imm_values)
37607 switch ( GET_CODE (*in_rtx))
37612 (imm_values->imm)++;
37613 if (x86_64_immediate_operand (*in_rtx, SImode))
37614 (imm_values->imm32)++;
37616 (imm_values->imm64)++;
37620 (imm_values->imm)++;
37621 (imm_values->imm64)++;
37625 if (LABEL_KIND (*in_rtx) == LABEL_NORMAL)
37627 (imm_values->imm)++;
37628 (imm_values->imm32)++;
37639 /* Compute number of immediate operands of an instruction. */
37642 find_constant (rtx in_rtx, imm_info *imm_values)
37644 for_each_rtx (INSN_P (in_rtx) ? &PATTERN (in_rtx) : &in_rtx,
37645 (rtx_function) find_constant_1, (void *) imm_values);
37648 /* Return total size of immediate operands of an instruction along with number
37649 of corresponding immediate-operands. It initializes its parameters to zero
37650 befor calling FIND_CONSTANT.
37651 INSN is the input instruction. IMM is the total of immediates.
37652 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
37656 get_num_immediates (rtx insn, int *imm, int *imm32, int *imm64)
37658 imm_info imm_values = {0, 0, 0};
37660 find_constant (insn, &imm_values);
37661 *imm = imm_values.imm;
37662 *imm32 = imm_values.imm32;
37663 *imm64 = imm_values.imm64;
37664 return imm_values.imm32 * 4 + imm_values.imm64 * 8;
37667 /* This function indicates if an operand of an instruction is an
37671 has_immediate (rtx insn)
37673 int num_imm_operand;
37674 int num_imm32_operand;
37675 int num_imm64_operand;
37678 return get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
37679 &num_imm64_operand);
37683 /* Return single or double path for instructions. */
37685 static enum insn_path
37686 get_insn_path (rtx insn)
37688 enum attr_amdfam10_decode path = get_attr_amdfam10_decode (insn);
37690 if ((int)path == 0)
37691 return path_single;
37693 if ((int)path == 1)
37694 return path_double;
37699 /* Return insn dispatch group. */
37701 static enum dispatch_group
37702 get_insn_group (rtx insn)
37704 enum dispatch_group group = get_mem_group (insn);
37708 if (is_branch (insn))
37709 return disp_branch;
37714 if (has_immediate (insn))
37717 if (is_prefetch (insn))
37718 return disp_prefetch;
37720 return disp_no_group;
37723 /* Count number of GROUP restricted instructions in a dispatch
37724 window WINDOW_LIST. */
37727 count_num_restricted (rtx insn, dispatch_windows *window_list)
37729 enum dispatch_group group = get_insn_group (insn);
37731 int num_imm_operand;
37732 int num_imm32_operand;
37733 int num_imm64_operand;
37735 if (group == disp_no_group)
37738 if (group == disp_imm)
37740 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
37741 &num_imm64_operand);
37742 if (window_list->imm_size + imm_size > MAX_IMM_SIZE
37743 || num_imm_operand + window_list->num_imm > MAX_IMM
37744 || (num_imm32_operand > 0
37745 && (window_list->num_imm_32 + num_imm32_operand > MAX_IMM_32
37746 || window_list->num_imm_64 * 2 + num_imm32_operand > MAX_IMM_32))
37747 || (num_imm64_operand > 0
37748 && (window_list->num_imm_64 + num_imm64_operand > MAX_IMM_64
37749 || window_list->num_imm_32 + num_imm64_operand * 2 > MAX_IMM_32))
37750 || (window_list->imm_size + imm_size == MAX_IMM_SIZE
37751 && num_imm64_operand > 0
37752 && ((window_list->num_imm_64 > 0
37753 && window_list->num_insn >= 2)
37754 || window_list->num_insn >= 3)))
37760 if ((group == disp_load_store
37761 && (window_list->num_loads >= MAX_LOAD
37762 || window_list->num_stores >= MAX_STORE))
37763 || ((group == disp_load
37764 || group == disp_prefetch)
37765 && window_list->num_loads >= MAX_LOAD)
37766 || (group == disp_store
37767 && window_list->num_stores >= MAX_STORE))
37773 /* This function returns true if insn satisfies dispatch rules on the
37774 last window scheduled. */
37777 fits_dispatch_window (rtx insn)
37779 dispatch_windows *window_list = dispatch_window_list;
37780 dispatch_windows *window_list_next = dispatch_window_list->next;
37781 unsigned int num_restrict;
37782 enum dispatch_group group = get_insn_group (insn);
37783 enum insn_path path = get_insn_path (insn);
37786 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
37787 instructions should be given the lowest priority in the
37788 scheduling process in Haifa scheduler to make sure they will be
37789 scheduled in the same dispatch window as the refrence to them. */
37790 if (group == disp_jcc || group == disp_cmp)
37793 /* Check nonrestricted. */
37794 if (group == disp_no_group || group == disp_branch)
37797 /* Get last dispatch window. */
37798 if (window_list_next)
37799 window_list = window_list_next;
37801 if (window_list->window_num == 1)
37803 sum = window_list->prev->window_size + window_list->window_size;
37806 || (min_insn_size (insn) + sum) >= 48)
37807 /* Window 1 is full. Go for next window. */
37811 num_restrict = count_num_restricted (insn, window_list);
37813 if (num_restrict > num_allowable_groups[group])
37816 /* See if it fits in the first window. */
37817 if (window_list->window_num == 0)
37819 /* The first widow should have only single and double path
37821 if (path == path_double
37822 && (window_list->num_uops + 2) > MAX_INSN)
37824 else if (path != path_single)
37830 /* Add an instruction INSN with NUM_UOPS micro-operations to the
37831 dispatch window WINDOW_LIST. */
37834 add_insn_window (rtx insn, dispatch_windows *window_list, int num_uops)
37836 int byte_len = min_insn_size (insn);
37837 int num_insn = window_list->num_insn;
37839 sched_insn_info *window = window_list->window;
37840 enum dispatch_group group = get_insn_group (insn);
37841 enum insn_path path = get_insn_path (insn);
37842 int num_imm_operand;
37843 int num_imm32_operand;
37844 int num_imm64_operand;
37846 if (!window_list->violation && group != disp_cmp
37847 && !fits_dispatch_window (insn))
37848 window_list->violation = true;
37850 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
37851 &num_imm64_operand);
37853 /* Initialize window with new instruction. */
37854 window[num_insn].insn = insn;
37855 window[num_insn].byte_len = byte_len;
37856 window[num_insn].group = group;
37857 window[num_insn].path = path;
37858 window[num_insn].imm_bytes = imm_size;
37860 window_list->window_size += byte_len;
37861 window_list->num_insn = num_insn + 1;
37862 window_list->num_uops = window_list->num_uops + num_uops;
37863 window_list->imm_size += imm_size;
37864 window_list->num_imm += num_imm_operand;
37865 window_list->num_imm_32 += num_imm32_operand;
37866 window_list->num_imm_64 += num_imm64_operand;
37868 if (group == disp_store)
37869 window_list->num_stores += 1;
37870 else if (group == disp_load
37871 || group == disp_prefetch)
37872 window_list->num_loads += 1;
37873 else if (group == disp_load_store)
37875 window_list->num_stores += 1;
37876 window_list->num_loads += 1;
37880 /* Adds a scheduled instruction, INSN, to the current dispatch window.
37881 If the total bytes of instructions or the number of instructions in
37882 the window exceed allowable, it allocates a new window. */
37885 add_to_dispatch_window (rtx insn)
37888 dispatch_windows *window_list;
37889 dispatch_windows *next_list;
37890 dispatch_windows *window0_list;
37891 enum insn_path path;
37892 enum dispatch_group insn_group;
37900 if (INSN_CODE (insn) < 0)
37903 byte_len = min_insn_size (insn);
37904 window_list = dispatch_window_list;
37905 next_list = window_list->next;
37906 path = get_insn_path (insn);
37907 insn_group = get_insn_group (insn);
37909 /* Get the last dispatch window. */
37911 window_list = dispatch_window_list->next;
37913 if (path == path_single)
37915 else if (path == path_double)
37918 insn_num_uops = (int) path;
37920 /* If current window is full, get a new window.
37921 Window number zero is full, if MAX_INSN uops are scheduled in it.
37922 Window number one is full, if window zero's bytes plus window
37923 one's bytes is 32, or if the bytes of the new instruction added
37924 to the total makes it greater than 48, or it has already MAX_INSN
37925 instructions in it. */
37926 num_insn = window_list->num_insn;
37927 num_uops = window_list->num_uops;
37928 window_num = window_list->window_num;
37929 insn_fits = fits_dispatch_window (insn);
37931 if (num_insn >= MAX_INSN
37932 || num_uops + insn_num_uops > MAX_INSN
37935 window_num = ~window_num & 1;
37936 window_list = allocate_next_window (window_num);
37939 if (window_num == 0)
37941 add_insn_window (insn, window_list, insn_num_uops);
37942 if (window_list->num_insn >= MAX_INSN
37943 && insn_group == disp_branch)
37945 process_end_window ();
37949 else if (window_num == 1)
37951 window0_list = window_list->prev;
37952 sum = window0_list->window_size + window_list->window_size;
37954 || (byte_len + sum) >= 48)
37956 process_end_window ();
37957 window_list = dispatch_window_list;
37960 add_insn_window (insn, window_list, insn_num_uops);
37963 gcc_unreachable ();
37965 if (is_end_basic_block (insn_group))
37967 /* End of basic block is reached do end-basic-block process. */
37968 process_end_window ();
37973 /* Print the dispatch window, WINDOW_NUM, to FILE. */
37975 DEBUG_FUNCTION static void
37976 debug_dispatch_window_file (FILE *file, int window_num)
37978 dispatch_windows *list;
37981 if (window_num == 0)
37982 list = dispatch_window_list;
37984 list = dispatch_window_list1;
37986 fprintf (file, "Window #%d:\n", list->window_num);
37987 fprintf (file, " num_insn = %d, num_uops = %d, window_size = %d\n",
37988 list->num_insn, list->num_uops, list->window_size);
37989 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
37990 list->num_imm, list->num_imm_32, list->num_imm_64, list->imm_size);
37992 fprintf (file, " num_loads = %d, num_stores = %d\n", list->num_loads,
37994 fprintf (file, " insn info:\n");
37996 for (i = 0; i < MAX_INSN; i++)
37998 if (!list->window[i].insn)
38000 fprintf (file, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
38001 i, group_name[list->window[i].group],
38002 i, (void *)list->window[i].insn,
38003 i, list->window[i].path,
38004 i, list->window[i].byte_len,
38005 i, list->window[i].imm_bytes);
38009 /* Print to stdout a dispatch window. */
38011 DEBUG_FUNCTION void
38012 debug_dispatch_window (int window_num)
38014 debug_dispatch_window_file (stdout, window_num);
38017 /* Print INSN dispatch information to FILE. */
38019 DEBUG_FUNCTION static void
38020 debug_insn_dispatch_info_file (FILE *file, rtx insn)
38023 enum insn_path path;
38024 enum dispatch_group group;
38026 int num_imm_operand;
38027 int num_imm32_operand;
38028 int num_imm64_operand;
38030 if (INSN_CODE (insn) < 0)
38033 byte_len = min_insn_size (insn);
38034 path = get_insn_path (insn);
38035 group = get_insn_group (insn);
38036 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
38037 &num_imm64_operand);
38039 fprintf (file, " insn info:\n");
38040 fprintf (file, " group = %s, path = %d, byte_len = %d\n",
38041 group_name[group], path, byte_len);
38042 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
38043 num_imm_operand, num_imm32_operand, num_imm64_operand, imm_size);
38046 /* Print to STDERR the status of the ready list with respect to
38047 dispatch windows. */
38049 DEBUG_FUNCTION void
38050 debug_ready_dispatch (void)
38053 int no_ready = number_in_ready ();
38055 fprintf (stdout, "Number of ready: %d\n", no_ready);
38057 for (i = 0; i < no_ready; i++)
38058 debug_insn_dispatch_info_file (stdout, get_ready_element (i));
38061 /* This routine is the driver of the dispatch scheduler. */
38064 do_dispatch (rtx insn, int mode)
38066 if (mode == DISPATCH_INIT)
38067 init_dispatch_sched ();
38068 else if (mode == ADD_TO_DISPATCH_WINDOW)
38069 add_to_dispatch_window (insn);
38072 /* Return TRUE if Dispatch Scheduling is supported. */
38075 has_dispatch (rtx insn, int action)
38077 if ((ix86_tune == PROCESSOR_BDVER1 || ix86_tune == PROCESSOR_BDVER2)
38078 && flag_dispatch_scheduler)
38084 case IS_DISPATCH_ON:
38089 return is_cmp (insn);
38091 case DISPATCH_VIOLATION:
38092 return dispatch_violation ();
38094 case FITS_DISPATCH_WINDOW:
38095 return fits_dispatch_window (insn);
38101 /* Implementation of reassociation_width target hook used by
38102 reassoc phase to identify parallelism level in reassociated
38103 tree. Statements tree_code is passed in OPC. Arguments type
38106 Currently parallel reassociation is enabled for Atom
38107 processors only and we set reassociation width to be 2
38108 because Atom may issue up to 2 instructions per cycle.
38110 Return value should be fixed if parallel reassociation is
38111 enabled for other processors. */
38114 ix86_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
38115 enum machine_mode mode)
38119 if (INTEGRAL_MODE_P (mode) && TARGET_REASSOC_INT_TO_PARALLEL)
38121 else if (FLOAT_MODE_P (mode) && TARGET_REASSOC_FP_TO_PARALLEL)
38127 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
38128 place emms and femms instructions. */
38130 static enum machine_mode
38131 ix86_preferred_simd_mode (enum machine_mode mode)
38139 return (TARGET_AVX && !TARGET_PREFER_AVX128) ? V32QImode : V16QImode;
38141 return (TARGET_AVX && !TARGET_PREFER_AVX128) ? V16HImode : V8HImode;
38143 return (TARGET_AVX && !TARGET_PREFER_AVX128) ? V8SImode : V4SImode;
38145 return (TARGET_AVX && !TARGET_PREFER_AVX128) ? V4DImode : V2DImode;
38148 if (TARGET_AVX && !TARGET_PREFER_AVX128)
38154 if (!TARGET_VECTORIZE_DOUBLE)
38156 else if (TARGET_AVX && !TARGET_PREFER_AVX128)
38158 else if (TARGET_SSE2)
38167 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
38170 static unsigned int
38171 ix86_autovectorize_vector_sizes (void)
38173 return (TARGET_AVX && !TARGET_PREFER_AVX128) ? 32 | 16 : 0;
38176 /* Initialize the GCC target structure. */
38177 #undef TARGET_RETURN_IN_MEMORY
38178 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
38180 #undef TARGET_LEGITIMIZE_ADDRESS
38181 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
38183 #undef TARGET_ATTRIBUTE_TABLE
38184 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
38185 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
38186 # undef TARGET_MERGE_DECL_ATTRIBUTES
38187 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
38190 #undef TARGET_COMP_TYPE_ATTRIBUTES
38191 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
38193 #undef TARGET_INIT_BUILTINS
38194 #define TARGET_INIT_BUILTINS ix86_init_builtins
38195 #undef TARGET_BUILTIN_DECL
38196 #define TARGET_BUILTIN_DECL ix86_builtin_decl
38197 #undef TARGET_EXPAND_BUILTIN
38198 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
38200 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
38201 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
38202 ix86_builtin_vectorized_function
38204 #undef TARGET_VECTORIZE_BUILTIN_TM_LOAD
38205 #define TARGET_VECTORIZE_BUILTIN_TM_LOAD ix86_builtin_tm_load
38207 #undef TARGET_VECTORIZE_BUILTIN_TM_STORE
38208 #define TARGET_VECTORIZE_BUILTIN_TM_STORE ix86_builtin_tm_store
38210 #undef TARGET_VECTORIZE_BUILTIN_GATHER
38211 #define TARGET_VECTORIZE_BUILTIN_GATHER ix86_vectorize_builtin_gather
38213 #undef TARGET_BUILTIN_RECIPROCAL
38214 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
38216 #undef TARGET_ASM_FUNCTION_EPILOGUE
38217 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
38219 #undef TARGET_ENCODE_SECTION_INFO
38220 #ifndef SUBTARGET_ENCODE_SECTION_INFO
38221 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
38223 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
38226 #undef TARGET_ASM_OPEN_PAREN
38227 #define TARGET_ASM_OPEN_PAREN ""
38228 #undef TARGET_ASM_CLOSE_PAREN
38229 #define TARGET_ASM_CLOSE_PAREN ""
38231 #undef TARGET_ASM_BYTE_OP
38232 #define TARGET_ASM_BYTE_OP ASM_BYTE
38234 #undef TARGET_ASM_ALIGNED_HI_OP
38235 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
38236 #undef TARGET_ASM_ALIGNED_SI_OP
38237 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
38239 #undef TARGET_ASM_ALIGNED_DI_OP
38240 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
38243 #undef TARGET_PROFILE_BEFORE_PROLOGUE
38244 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
38246 #undef TARGET_ASM_UNALIGNED_HI_OP
38247 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
38248 #undef TARGET_ASM_UNALIGNED_SI_OP
38249 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
38250 #undef TARGET_ASM_UNALIGNED_DI_OP
38251 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
38253 #undef TARGET_PRINT_OPERAND
38254 #define TARGET_PRINT_OPERAND ix86_print_operand
38255 #undef TARGET_PRINT_OPERAND_ADDRESS
38256 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
38257 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
38258 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
38259 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
38260 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
38262 #undef TARGET_SCHED_INIT_GLOBAL
38263 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
38264 #undef TARGET_SCHED_ADJUST_COST
38265 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
38266 #undef TARGET_SCHED_ISSUE_RATE
38267 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
38268 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
38269 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
38270 ia32_multipass_dfa_lookahead
38272 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
38273 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
38276 #undef TARGET_HAVE_TLS
38277 #define TARGET_HAVE_TLS true
38279 #undef TARGET_CANNOT_FORCE_CONST_MEM
38280 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
38281 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
38282 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
38284 #undef TARGET_DELEGITIMIZE_ADDRESS
38285 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
38287 #undef TARGET_MS_BITFIELD_LAYOUT_P
38288 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
38291 #undef TARGET_BINDS_LOCAL_P
38292 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
38294 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
38295 #undef TARGET_BINDS_LOCAL_P
38296 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
38299 #undef TARGET_ASM_OUTPUT_MI_THUNK
38300 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
38301 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
38302 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
38304 #undef TARGET_ASM_FILE_START
38305 #define TARGET_ASM_FILE_START x86_file_start
38307 #undef TARGET_OPTION_OVERRIDE
38308 #define TARGET_OPTION_OVERRIDE ix86_option_override
38310 #undef TARGET_REGISTER_MOVE_COST
38311 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
38312 #undef TARGET_MEMORY_MOVE_COST
38313 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
38314 #undef TARGET_RTX_COSTS
38315 #define TARGET_RTX_COSTS ix86_rtx_costs
38316 #undef TARGET_ADDRESS_COST
38317 #define TARGET_ADDRESS_COST ix86_address_cost
38319 #undef TARGET_FIXED_CONDITION_CODE_REGS
38320 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
38321 #undef TARGET_CC_MODES_COMPATIBLE
38322 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
38324 #undef TARGET_MACHINE_DEPENDENT_REORG
38325 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
38327 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
38328 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
38330 #undef TARGET_BUILD_BUILTIN_VA_LIST
38331 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
38333 #undef TARGET_ENUM_VA_LIST_P
38334 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
38336 #undef TARGET_FN_ABI_VA_LIST
38337 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
38339 #undef TARGET_CANONICAL_VA_LIST_TYPE
38340 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
38342 #undef TARGET_EXPAND_BUILTIN_VA_START
38343 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
38345 #undef TARGET_MD_ASM_CLOBBERS
38346 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
38348 #undef TARGET_PROMOTE_PROTOTYPES
38349 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
38350 #undef TARGET_STRUCT_VALUE_RTX
38351 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
38352 #undef TARGET_SETUP_INCOMING_VARARGS
38353 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
38354 #undef TARGET_MUST_PASS_IN_STACK
38355 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
38356 #undef TARGET_FUNCTION_ARG_ADVANCE
38357 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
38358 #undef TARGET_FUNCTION_ARG
38359 #define TARGET_FUNCTION_ARG ix86_function_arg
38360 #undef TARGET_FUNCTION_ARG_BOUNDARY
38361 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
38362 #undef TARGET_PASS_BY_REFERENCE
38363 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
38364 #undef TARGET_INTERNAL_ARG_POINTER
38365 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
38366 #undef TARGET_UPDATE_STACK_BOUNDARY
38367 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
38368 #undef TARGET_GET_DRAP_RTX
38369 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
38370 #undef TARGET_STRICT_ARGUMENT_NAMING
38371 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
38372 #undef TARGET_STATIC_CHAIN
38373 #define TARGET_STATIC_CHAIN ix86_static_chain
38374 #undef TARGET_TRAMPOLINE_INIT
38375 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
38376 #undef TARGET_RETURN_POPS_ARGS
38377 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
38379 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
38380 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
38382 #undef TARGET_SCALAR_MODE_SUPPORTED_P
38383 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
38385 #undef TARGET_VECTOR_MODE_SUPPORTED_P
38386 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
38388 #undef TARGET_C_MODE_FOR_SUFFIX
38389 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
38392 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
38393 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
38396 #ifdef SUBTARGET_INSERT_ATTRIBUTES
38397 #undef TARGET_INSERT_ATTRIBUTES
38398 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
38401 #undef TARGET_MANGLE_TYPE
38402 #define TARGET_MANGLE_TYPE ix86_mangle_type
38404 #ifndef TARGET_MACHO
38405 #undef TARGET_STACK_PROTECT_FAIL
38406 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
38409 #undef TARGET_FUNCTION_VALUE
38410 #define TARGET_FUNCTION_VALUE ix86_function_value
38412 #undef TARGET_FUNCTION_VALUE_REGNO_P
38413 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
38415 #undef TARGET_PROMOTE_FUNCTION_MODE
38416 #define TARGET_PROMOTE_FUNCTION_MODE ix86_promote_function_mode
38418 #undef TARGET_SECONDARY_RELOAD
38419 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
38421 #undef TARGET_CLASS_MAX_NREGS
38422 #define TARGET_CLASS_MAX_NREGS ix86_class_max_nregs
38424 #undef TARGET_PREFERRED_RELOAD_CLASS
38425 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
38426 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
38427 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
38428 #undef TARGET_CLASS_LIKELY_SPILLED_P
38429 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
38431 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
38432 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
38433 ix86_builtin_vectorization_cost
38434 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
38435 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK \
38436 ix86_vectorize_vec_perm_const_ok
38437 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
38438 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
38439 ix86_preferred_simd_mode
38440 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
38441 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
38442 ix86_autovectorize_vector_sizes
38444 #undef TARGET_SET_CURRENT_FUNCTION
38445 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
38447 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
38448 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
38450 #undef TARGET_OPTION_SAVE
38451 #define TARGET_OPTION_SAVE ix86_function_specific_save
38453 #undef TARGET_OPTION_RESTORE
38454 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
38456 #undef TARGET_OPTION_PRINT
38457 #define TARGET_OPTION_PRINT ix86_function_specific_print
38459 #undef TARGET_CAN_INLINE_P
38460 #define TARGET_CAN_INLINE_P ix86_can_inline_p
38462 #undef TARGET_EXPAND_TO_RTL_HOOK
38463 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
38465 #undef TARGET_LEGITIMATE_ADDRESS_P
38466 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
38468 #undef TARGET_LEGITIMATE_CONSTANT_P
38469 #define TARGET_LEGITIMATE_CONSTANT_P ix86_legitimate_constant_p
38471 #undef TARGET_FRAME_POINTER_REQUIRED
38472 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
38474 #undef TARGET_CAN_ELIMINATE
38475 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
38477 #undef TARGET_EXTRA_LIVE_ON_ENTRY
38478 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
38480 #undef TARGET_ASM_CODE_END
38481 #define TARGET_ASM_CODE_END ix86_code_end
38483 #undef TARGET_CONDITIONAL_REGISTER_USAGE
38484 #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage
38487 #undef TARGET_INIT_LIBFUNCS
38488 #define TARGET_INIT_LIBFUNCS darwin_rename_builtins
38491 struct gcc_target targetm = TARGET_INITIALIZER;
38493 #include "gt-i386.h"