1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
42 #include "diagnostic-core.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "common/common-target.h"
49 #include "langhooks.h"
54 #include "tm-constrs.h"
58 #include "sched-int.h"
62 #include "diagnostic.h"
64 enum upper_128bits_state
71 typedef struct block_info_def
73 /* State of the upper 128bits of AVX registers at exit. */
74 enum upper_128bits_state state;
75 /* TRUE if state of the upper 128bits of AVX registers is unchanged
78 /* TRUE if block has been processed. */
80 /* TRUE if block has been scanned. */
82 /* Previous state of the upper 128bits of AVX registers at entry. */
83 enum upper_128bits_state prev;
86 #define BLOCK_INFO(B) ((block_info) (B)->aux)
88 enum call_avx256_state
90 /* Callee returns 256bit AVX register. */
91 callee_return_avx256 = -1,
92 /* Callee returns and passes 256bit AVX register. */
93 callee_return_pass_avx256,
94 /* Callee passes 256bit AVX register. */
96 /* Callee doesn't return nor passe 256bit AVX register, or no
97 256bit AVX register in function return. */
99 /* vzeroupper intrinsic. */
103 /* Check if a 256bit AVX register is referenced in stores. */
106 check_avx256_stores (rtx dest, const_rtx set, void *data)
109 && VALID_AVX256_REG_MODE (GET_MODE (dest)))
110 || (GET_CODE (set) == SET
111 && REG_P (SET_SRC (set))
112 && VALID_AVX256_REG_MODE (GET_MODE (SET_SRC (set)))))
114 enum upper_128bits_state *state
115 = (enum upper_128bits_state *) data;
120 /* Helper function for move_or_delete_vzeroupper_1. Look for vzeroupper
121 in basic block BB. Delete it if upper 128bit AVX registers are
122 unused. If it isn't deleted, move it to just before a jump insn.
124 STATE is state of the upper 128bits of AVX registers at entry. */
127 move_or_delete_vzeroupper_2 (basic_block bb,
128 enum upper_128bits_state state)
131 rtx vzeroupper_insn = NULL_RTX;
136 if (BLOCK_INFO (bb)->unchanged)
139 fprintf (dump_file, " [bb %i] unchanged: upper 128bits: %d\n",
142 BLOCK_INFO (bb)->state = state;
146 if (BLOCK_INFO (bb)->scanned && BLOCK_INFO (bb)->prev == state)
149 fprintf (dump_file, " [bb %i] scanned: upper 128bits: %d\n",
150 bb->index, BLOCK_INFO (bb)->state);
154 BLOCK_INFO (bb)->prev = state;
157 fprintf (dump_file, " [bb %i] entry: upper 128bits: %d\n",
162 /* BB_END changes when it is deleted. */
163 bb_end = BB_END (bb);
165 while (insn != bb_end)
167 insn = NEXT_INSN (insn);
169 if (!NONDEBUG_INSN_P (insn))
172 /* Move vzeroupper before jump/call. */
173 if (JUMP_P (insn) || CALL_P (insn))
175 if (!vzeroupper_insn)
178 if (PREV_INSN (insn) != vzeroupper_insn)
182 fprintf (dump_file, "Move vzeroupper after:\n");
183 print_rtl_single (dump_file, PREV_INSN (insn));
184 fprintf (dump_file, "before:\n");
185 print_rtl_single (dump_file, insn);
187 reorder_insns_nobb (vzeroupper_insn, vzeroupper_insn,
190 vzeroupper_insn = NULL_RTX;
194 pat = PATTERN (insn);
196 /* Check insn for vzeroupper intrinsic. */
197 if (GET_CODE (pat) == UNSPEC_VOLATILE
198 && XINT (pat, 1) == UNSPECV_VZEROUPPER)
202 /* Found vzeroupper intrinsic. */
203 fprintf (dump_file, "Found vzeroupper:\n");
204 print_rtl_single (dump_file, insn);
209 /* Check insn for vzeroall intrinsic. */
210 if (GET_CODE (pat) == PARALLEL
211 && GET_CODE (XVECEXP (pat, 0, 0)) == UNSPEC_VOLATILE
212 && XINT (XVECEXP (pat, 0, 0), 1) == UNSPECV_VZEROALL)
217 /* Delete pending vzeroupper insertion. */
220 delete_insn (vzeroupper_insn);
221 vzeroupper_insn = NULL_RTX;
224 else if (state != used)
226 note_stores (pat, check_avx256_stores, &state);
233 /* Process vzeroupper intrinsic. */
234 avx256 = INTVAL (XVECEXP (pat, 0, 0));
238 /* Since the upper 128bits are cleared, callee must not pass
239 256bit AVX register. We only need to check if callee
240 returns 256bit AVX register. */
241 if (avx256 == callee_return_avx256)
247 /* Remove unnecessary vzeroupper since upper 128bits are
251 fprintf (dump_file, "Delete redundant vzeroupper:\n");
252 print_rtl_single (dump_file, insn);
258 /* Set state to UNUSED if callee doesn't return 256bit AVX
260 if (avx256 != callee_return_pass_avx256)
263 if (avx256 == callee_return_pass_avx256
264 || avx256 == callee_pass_avx256)
266 /* Must remove vzeroupper since callee passes in 256bit
270 fprintf (dump_file, "Delete callee pass vzeroupper:\n");
271 print_rtl_single (dump_file, insn);
277 vzeroupper_insn = insn;
283 BLOCK_INFO (bb)->state = state;
284 BLOCK_INFO (bb)->unchanged = unchanged;
285 BLOCK_INFO (bb)->scanned = true;
288 fprintf (dump_file, " [bb %i] exit: %s: upper 128bits: %d\n",
289 bb->index, unchanged ? "unchanged" : "changed",
293 /* Helper function for move_or_delete_vzeroupper. Process vzeroupper
294 in BLOCK and check its predecessor blocks. Treat UNKNOWN state
295 as USED if UNKNOWN_IS_UNUSED is true. Return TRUE if the exit
299 move_or_delete_vzeroupper_1 (basic_block block, bool unknown_is_unused)
303 enum upper_128bits_state state, old_state, new_state;
307 fprintf (dump_file, " Process [bb %i]: status: %d\n",
308 block->index, BLOCK_INFO (block)->processed);
310 if (BLOCK_INFO (block)->processed)
315 /* Check all predecessor edges of this block. */
316 seen_unknown = false;
317 FOR_EACH_EDGE (e, ei, block->preds)
321 switch (BLOCK_INFO (e->src)->state)
324 if (!unknown_is_unused)
338 old_state = BLOCK_INFO (block)->state;
339 move_or_delete_vzeroupper_2 (block, state);
340 new_state = BLOCK_INFO (block)->state;
342 if (state != unknown || new_state == used)
343 BLOCK_INFO (block)->processed = true;
345 /* Need to rescan if the upper 128bits of AVX registers are changed
347 if (new_state != old_state)
349 if (new_state == used)
350 cfun->machine->rescan_vzeroupper_p = 1;
357 /* Go through the instruction stream looking for vzeroupper. Delete
358 it if upper 128bit AVX registers are unused. If it isn't deleted,
359 move it to just before a jump insn. */
362 move_or_delete_vzeroupper (void)
367 fibheap_t worklist, pending, fibheap_swap;
368 sbitmap visited, in_worklist, in_pending, sbitmap_swap;
373 /* Set up block info for each basic block. */
374 alloc_aux_for_blocks (sizeof (struct block_info_def));
376 /* Process outgoing edges of entry point. */
378 fprintf (dump_file, "Process outgoing edges of entry point\n");
380 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
382 move_or_delete_vzeroupper_2 (e->dest,
383 cfun->machine->caller_pass_avx256_p
385 BLOCK_INFO (e->dest)->processed = true;
388 /* Compute reverse completion order of depth first search of the CFG
389 so that the data-flow runs faster. */
390 rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
391 bb_order = XNEWVEC (int, last_basic_block);
392 pre_and_rev_post_order_compute (NULL, rc_order, false);
393 for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
394 bb_order[rc_order[i]] = i;
397 worklist = fibheap_new ();
398 pending = fibheap_new ();
399 visited = sbitmap_alloc (last_basic_block);
400 in_worklist = sbitmap_alloc (last_basic_block);
401 in_pending = sbitmap_alloc (last_basic_block);
402 sbitmap_zero (in_worklist);
404 /* Don't check outgoing edges of entry point. */
405 sbitmap_ones (in_pending);
407 if (BLOCK_INFO (bb)->processed)
408 RESET_BIT (in_pending, bb->index);
411 move_or_delete_vzeroupper_1 (bb, false);
412 fibheap_insert (pending, bb_order[bb->index], bb);
416 fprintf (dump_file, "Check remaining basic blocks\n");
418 while (!fibheap_empty (pending))
420 fibheap_swap = pending;
422 worklist = fibheap_swap;
423 sbitmap_swap = in_pending;
424 in_pending = in_worklist;
425 in_worklist = sbitmap_swap;
427 sbitmap_zero (visited);
429 cfun->machine->rescan_vzeroupper_p = 0;
431 while (!fibheap_empty (worklist))
433 bb = (basic_block) fibheap_extract_min (worklist);
434 RESET_BIT (in_worklist, bb->index);
435 gcc_assert (!TEST_BIT (visited, bb->index));
436 if (!TEST_BIT (visited, bb->index))
440 SET_BIT (visited, bb->index);
442 if (move_or_delete_vzeroupper_1 (bb, false))
443 FOR_EACH_EDGE (e, ei, bb->succs)
445 if (e->dest == EXIT_BLOCK_PTR
446 || BLOCK_INFO (e->dest)->processed)
449 if (TEST_BIT (visited, e->dest->index))
451 if (!TEST_BIT (in_pending, e->dest->index))
453 /* Send E->DEST to next round. */
454 SET_BIT (in_pending, e->dest->index);
455 fibheap_insert (pending,
456 bb_order[e->dest->index],
460 else if (!TEST_BIT (in_worklist, e->dest->index))
462 /* Add E->DEST to current round. */
463 SET_BIT (in_worklist, e->dest->index);
464 fibheap_insert (worklist, bb_order[e->dest->index],
471 if (!cfun->machine->rescan_vzeroupper_p)
476 fibheap_delete (worklist);
477 fibheap_delete (pending);
478 sbitmap_free (visited);
479 sbitmap_free (in_worklist);
480 sbitmap_free (in_pending);
483 fprintf (dump_file, "Process remaining basic blocks\n");
486 move_or_delete_vzeroupper_1 (bb, true);
488 free_aux_for_blocks ();
491 static rtx legitimize_dllimport_symbol (rtx, bool);
493 #ifndef CHECK_STACK_LIMIT
494 #define CHECK_STACK_LIMIT (-1)
497 /* Return index of given mode in mult and division cost tables. */
498 #define MODE_INDEX(mode) \
499 ((mode) == QImode ? 0 \
500 : (mode) == HImode ? 1 \
501 : (mode) == SImode ? 2 \
502 : (mode) == DImode ? 3 \
505 /* Processor costs (relative to an add) */
506 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
507 #define COSTS_N_BYTES(N) ((N) * 2)
509 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
512 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
513 COSTS_N_BYTES (2), /* cost of an add instruction */
514 COSTS_N_BYTES (3), /* cost of a lea instruction */
515 COSTS_N_BYTES (2), /* variable shift costs */
516 COSTS_N_BYTES (3), /* constant shift costs */
517 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
518 COSTS_N_BYTES (3), /* HI */
519 COSTS_N_BYTES (3), /* SI */
520 COSTS_N_BYTES (3), /* DI */
521 COSTS_N_BYTES (5)}, /* other */
522 0, /* cost of multiply per each bit set */
523 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
524 COSTS_N_BYTES (3), /* HI */
525 COSTS_N_BYTES (3), /* SI */
526 COSTS_N_BYTES (3), /* DI */
527 COSTS_N_BYTES (5)}, /* other */
528 COSTS_N_BYTES (3), /* cost of movsx */
529 COSTS_N_BYTES (3), /* cost of movzx */
530 0, /* "large" insn */
532 2, /* cost for loading QImode using movzbl */
533 {2, 2, 2}, /* cost of loading integer registers
534 in QImode, HImode and SImode.
535 Relative to reg-reg move (2). */
536 {2, 2, 2}, /* cost of storing integer registers */
537 2, /* cost of reg,reg fld/fst */
538 {2, 2, 2}, /* cost of loading fp registers
539 in SFmode, DFmode and XFmode */
540 {2, 2, 2}, /* cost of storing fp registers
541 in SFmode, DFmode and XFmode */
542 3, /* cost of moving MMX register */
543 {3, 3}, /* cost of loading MMX registers
544 in SImode and DImode */
545 {3, 3}, /* cost of storing MMX registers
546 in SImode and DImode */
547 3, /* cost of moving SSE register */
548 {3, 3, 3}, /* cost of loading SSE registers
549 in SImode, DImode and TImode */
550 {3, 3, 3}, /* cost of storing SSE registers
551 in SImode, DImode and TImode */
552 3, /* MMX or SSE register to integer */
553 0, /* size of l1 cache */
554 0, /* size of l2 cache */
555 0, /* size of prefetch block */
556 0, /* number of parallel prefetches */
558 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
559 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
560 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
561 COSTS_N_BYTES (2), /* cost of FABS instruction. */
562 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
563 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
564 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
565 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
566 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
567 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
568 1, /* scalar_stmt_cost. */
569 1, /* scalar load_cost. */
570 1, /* scalar_store_cost. */
571 1, /* vec_stmt_cost. */
572 1, /* vec_to_scalar_cost. */
573 1, /* scalar_to_vec_cost. */
574 1, /* vec_align_load_cost. */
575 1, /* vec_unalign_load_cost. */
576 1, /* vec_store_cost. */
577 1, /* cond_taken_branch_cost. */
578 1, /* cond_not_taken_branch_cost. */
581 /* Processor costs (relative to an add) */
583 struct processor_costs i386_cost = { /* 386 specific costs */
584 COSTS_N_INSNS (1), /* cost of an add instruction */
585 COSTS_N_INSNS (1), /* cost of a lea instruction */
586 COSTS_N_INSNS (3), /* variable shift costs */
587 COSTS_N_INSNS (2), /* constant shift costs */
588 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
589 COSTS_N_INSNS (6), /* HI */
590 COSTS_N_INSNS (6), /* SI */
591 COSTS_N_INSNS (6), /* DI */
592 COSTS_N_INSNS (6)}, /* other */
593 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
594 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
595 COSTS_N_INSNS (23), /* HI */
596 COSTS_N_INSNS (23), /* SI */
597 COSTS_N_INSNS (23), /* DI */
598 COSTS_N_INSNS (23)}, /* other */
599 COSTS_N_INSNS (3), /* cost of movsx */
600 COSTS_N_INSNS (2), /* cost of movzx */
601 15, /* "large" insn */
603 4, /* cost for loading QImode using movzbl */
604 {2, 4, 2}, /* cost of loading integer registers
605 in QImode, HImode and SImode.
606 Relative to reg-reg move (2). */
607 {2, 4, 2}, /* cost of storing integer registers */
608 2, /* cost of reg,reg fld/fst */
609 {8, 8, 8}, /* cost of loading fp registers
610 in SFmode, DFmode and XFmode */
611 {8, 8, 8}, /* cost of storing fp registers
612 in SFmode, DFmode and XFmode */
613 2, /* cost of moving MMX register */
614 {4, 8}, /* cost of loading MMX registers
615 in SImode and DImode */
616 {4, 8}, /* cost of storing MMX registers
617 in SImode and DImode */
618 2, /* cost of moving SSE register */
619 {4, 8, 16}, /* cost of loading SSE registers
620 in SImode, DImode and TImode */
621 {4, 8, 16}, /* cost of storing SSE registers
622 in SImode, DImode and TImode */
623 3, /* MMX or SSE register to integer */
624 0, /* size of l1 cache */
625 0, /* size of l2 cache */
626 0, /* size of prefetch block */
627 0, /* number of parallel prefetches */
629 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
630 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
631 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
632 COSTS_N_INSNS (22), /* cost of FABS instruction. */
633 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
634 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
635 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
636 DUMMY_STRINGOP_ALGS},
637 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
638 DUMMY_STRINGOP_ALGS},
639 1, /* scalar_stmt_cost. */
640 1, /* scalar load_cost. */
641 1, /* scalar_store_cost. */
642 1, /* vec_stmt_cost. */
643 1, /* vec_to_scalar_cost. */
644 1, /* scalar_to_vec_cost. */
645 1, /* vec_align_load_cost. */
646 2, /* vec_unalign_load_cost. */
647 1, /* vec_store_cost. */
648 3, /* cond_taken_branch_cost. */
649 1, /* cond_not_taken_branch_cost. */
653 struct processor_costs i486_cost = { /* 486 specific costs */
654 COSTS_N_INSNS (1), /* cost of an add instruction */
655 COSTS_N_INSNS (1), /* cost of a lea instruction */
656 COSTS_N_INSNS (3), /* variable shift costs */
657 COSTS_N_INSNS (2), /* constant shift costs */
658 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
659 COSTS_N_INSNS (12), /* HI */
660 COSTS_N_INSNS (12), /* SI */
661 COSTS_N_INSNS (12), /* DI */
662 COSTS_N_INSNS (12)}, /* other */
663 1, /* cost of multiply per each bit set */
664 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
665 COSTS_N_INSNS (40), /* HI */
666 COSTS_N_INSNS (40), /* SI */
667 COSTS_N_INSNS (40), /* DI */
668 COSTS_N_INSNS (40)}, /* other */
669 COSTS_N_INSNS (3), /* cost of movsx */
670 COSTS_N_INSNS (2), /* cost of movzx */
671 15, /* "large" insn */
673 4, /* cost for loading QImode using movzbl */
674 {2, 4, 2}, /* cost of loading integer registers
675 in QImode, HImode and SImode.
676 Relative to reg-reg move (2). */
677 {2, 4, 2}, /* cost of storing integer registers */
678 2, /* cost of reg,reg fld/fst */
679 {8, 8, 8}, /* cost of loading fp registers
680 in SFmode, DFmode and XFmode */
681 {8, 8, 8}, /* cost of storing fp registers
682 in SFmode, DFmode and XFmode */
683 2, /* cost of moving MMX register */
684 {4, 8}, /* cost of loading MMX registers
685 in SImode and DImode */
686 {4, 8}, /* cost of storing MMX registers
687 in SImode and DImode */
688 2, /* cost of moving SSE register */
689 {4, 8, 16}, /* cost of loading SSE registers
690 in SImode, DImode and TImode */
691 {4, 8, 16}, /* cost of storing SSE registers
692 in SImode, DImode and TImode */
693 3, /* MMX or SSE register to integer */
694 4, /* size of l1 cache. 486 has 8kB cache
695 shared for code and data, so 4kB is
696 not really precise. */
697 4, /* size of l2 cache */
698 0, /* size of prefetch block */
699 0, /* number of parallel prefetches */
701 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
702 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
703 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
704 COSTS_N_INSNS (3), /* cost of FABS instruction. */
705 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
706 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
707 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
708 DUMMY_STRINGOP_ALGS},
709 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
710 DUMMY_STRINGOP_ALGS},
711 1, /* scalar_stmt_cost. */
712 1, /* scalar load_cost. */
713 1, /* scalar_store_cost. */
714 1, /* vec_stmt_cost. */
715 1, /* vec_to_scalar_cost. */
716 1, /* scalar_to_vec_cost. */
717 1, /* vec_align_load_cost. */
718 2, /* vec_unalign_load_cost. */
719 1, /* vec_store_cost. */
720 3, /* cond_taken_branch_cost. */
721 1, /* cond_not_taken_branch_cost. */
725 struct processor_costs pentium_cost = {
726 COSTS_N_INSNS (1), /* cost of an add instruction */
727 COSTS_N_INSNS (1), /* cost of a lea instruction */
728 COSTS_N_INSNS (4), /* variable shift costs */
729 COSTS_N_INSNS (1), /* constant shift costs */
730 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
731 COSTS_N_INSNS (11), /* HI */
732 COSTS_N_INSNS (11), /* SI */
733 COSTS_N_INSNS (11), /* DI */
734 COSTS_N_INSNS (11)}, /* other */
735 0, /* cost of multiply per each bit set */
736 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
737 COSTS_N_INSNS (25), /* HI */
738 COSTS_N_INSNS (25), /* SI */
739 COSTS_N_INSNS (25), /* DI */
740 COSTS_N_INSNS (25)}, /* other */
741 COSTS_N_INSNS (3), /* cost of movsx */
742 COSTS_N_INSNS (2), /* cost of movzx */
743 8, /* "large" insn */
745 6, /* cost for loading QImode using movzbl */
746 {2, 4, 2}, /* cost of loading integer registers
747 in QImode, HImode and SImode.
748 Relative to reg-reg move (2). */
749 {2, 4, 2}, /* cost of storing integer registers */
750 2, /* cost of reg,reg fld/fst */
751 {2, 2, 6}, /* cost of loading fp registers
752 in SFmode, DFmode and XFmode */
753 {4, 4, 6}, /* cost of storing fp registers
754 in SFmode, DFmode and XFmode */
755 8, /* cost of moving MMX register */
756 {8, 8}, /* cost of loading MMX registers
757 in SImode and DImode */
758 {8, 8}, /* cost of storing MMX registers
759 in SImode and DImode */
760 2, /* cost of moving SSE register */
761 {4, 8, 16}, /* cost of loading SSE registers
762 in SImode, DImode and TImode */
763 {4, 8, 16}, /* cost of storing SSE registers
764 in SImode, DImode and TImode */
765 3, /* MMX or SSE register to integer */
766 8, /* size of l1 cache. */
767 8, /* size of l2 cache */
768 0, /* size of prefetch block */
769 0, /* number of parallel prefetches */
771 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
772 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
773 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
774 COSTS_N_INSNS (1), /* cost of FABS instruction. */
775 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
776 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
777 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
778 DUMMY_STRINGOP_ALGS},
779 {{libcall, {{-1, rep_prefix_4_byte}}},
780 DUMMY_STRINGOP_ALGS},
781 1, /* scalar_stmt_cost. */
782 1, /* scalar load_cost. */
783 1, /* scalar_store_cost. */
784 1, /* vec_stmt_cost. */
785 1, /* vec_to_scalar_cost. */
786 1, /* scalar_to_vec_cost. */
787 1, /* vec_align_load_cost. */
788 2, /* vec_unalign_load_cost. */
789 1, /* vec_store_cost. */
790 3, /* cond_taken_branch_cost. */
791 1, /* cond_not_taken_branch_cost. */
795 struct processor_costs pentiumpro_cost = {
796 COSTS_N_INSNS (1), /* cost of an add instruction */
797 COSTS_N_INSNS (1), /* cost of a lea instruction */
798 COSTS_N_INSNS (1), /* variable shift costs */
799 COSTS_N_INSNS (1), /* constant shift costs */
800 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
801 COSTS_N_INSNS (4), /* HI */
802 COSTS_N_INSNS (4), /* SI */
803 COSTS_N_INSNS (4), /* DI */
804 COSTS_N_INSNS (4)}, /* other */
805 0, /* cost of multiply per each bit set */
806 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
807 COSTS_N_INSNS (17), /* HI */
808 COSTS_N_INSNS (17), /* SI */
809 COSTS_N_INSNS (17), /* DI */
810 COSTS_N_INSNS (17)}, /* other */
811 COSTS_N_INSNS (1), /* cost of movsx */
812 COSTS_N_INSNS (1), /* cost of movzx */
813 8, /* "large" insn */
815 2, /* cost for loading QImode using movzbl */
816 {4, 4, 4}, /* cost of loading integer registers
817 in QImode, HImode and SImode.
818 Relative to reg-reg move (2). */
819 {2, 2, 2}, /* cost of storing integer registers */
820 2, /* cost of reg,reg fld/fst */
821 {2, 2, 6}, /* cost of loading fp registers
822 in SFmode, DFmode and XFmode */
823 {4, 4, 6}, /* cost of storing fp registers
824 in SFmode, DFmode and XFmode */
825 2, /* cost of moving MMX register */
826 {2, 2}, /* cost of loading MMX registers
827 in SImode and DImode */
828 {2, 2}, /* cost of storing MMX registers
829 in SImode and DImode */
830 2, /* cost of moving SSE register */
831 {2, 2, 8}, /* cost of loading SSE registers
832 in SImode, DImode and TImode */
833 {2, 2, 8}, /* cost of storing SSE registers
834 in SImode, DImode and TImode */
835 3, /* MMX or SSE register to integer */
836 8, /* size of l1 cache. */
837 256, /* size of l2 cache */
838 32, /* size of prefetch block */
839 6, /* number of parallel prefetches */
841 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
842 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
843 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
844 COSTS_N_INSNS (2), /* cost of FABS instruction. */
845 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
846 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
847 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes
848 (we ensure the alignment). For small blocks inline loop is still a
849 noticeable win, for bigger blocks either rep movsl or rep movsb is
850 way to go. Rep movsb has apparently more expensive startup time in CPU,
851 but after 4K the difference is down in the noise. */
852 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
853 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
854 DUMMY_STRINGOP_ALGS},
855 {{rep_prefix_4_byte, {{1024, unrolled_loop},
856 {8192, rep_prefix_4_byte}, {-1, libcall}}},
857 DUMMY_STRINGOP_ALGS},
858 1, /* scalar_stmt_cost. */
859 1, /* scalar load_cost. */
860 1, /* scalar_store_cost. */
861 1, /* vec_stmt_cost. */
862 1, /* vec_to_scalar_cost. */
863 1, /* scalar_to_vec_cost. */
864 1, /* vec_align_load_cost. */
865 2, /* vec_unalign_load_cost. */
866 1, /* vec_store_cost. */
867 3, /* cond_taken_branch_cost. */
868 1, /* cond_not_taken_branch_cost. */
872 struct processor_costs geode_cost = {
873 COSTS_N_INSNS (1), /* cost of an add instruction */
874 COSTS_N_INSNS (1), /* cost of a lea instruction */
875 COSTS_N_INSNS (2), /* variable shift costs */
876 COSTS_N_INSNS (1), /* constant shift costs */
877 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
878 COSTS_N_INSNS (4), /* HI */
879 COSTS_N_INSNS (7), /* SI */
880 COSTS_N_INSNS (7), /* DI */
881 COSTS_N_INSNS (7)}, /* other */
882 0, /* cost of multiply per each bit set */
883 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
884 COSTS_N_INSNS (23), /* HI */
885 COSTS_N_INSNS (39), /* SI */
886 COSTS_N_INSNS (39), /* DI */
887 COSTS_N_INSNS (39)}, /* other */
888 COSTS_N_INSNS (1), /* cost of movsx */
889 COSTS_N_INSNS (1), /* cost of movzx */
890 8, /* "large" insn */
892 1, /* cost for loading QImode using movzbl */
893 {1, 1, 1}, /* cost of loading integer registers
894 in QImode, HImode and SImode.
895 Relative to reg-reg move (2). */
896 {1, 1, 1}, /* cost of storing integer registers */
897 1, /* cost of reg,reg fld/fst */
898 {1, 1, 1}, /* cost of loading fp registers
899 in SFmode, DFmode and XFmode */
900 {4, 6, 6}, /* cost of storing fp registers
901 in SFmode, DFmode and XFmode */
903 1, /* cost of moving MMX register */
904 {1, 1}, /* cost of loading MMX registers
905 in SImode and DImode */
906 {1, 1}, /* cost of storing MMX registers
907 in SImode and DImode */
908 1, /* cost of moving SSE register */
909 {1, 1, 1}, /* cost of loading SSE registers
910 in SImode, DImode and TImode */
911 {1, 1, 1}, /* cost of storing SSE registers
912 in SImode, DImode and TImode */
913 1, /* MMX or SSE register to integer */
914 64, /* size of l1 cache. */
915 128, /* size of l2 cache. */
916 32, /* size of prefetch block */
917 1, /* number of parallel prefetches */
919 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
920 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
921 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
922 COSTS_N_INSNS (1), /* cost of FABS instruction. */
923 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
924 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
925 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
926 DUMMY_STRINGOP_ALGS},
927 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
928 DUMMY_STRINGOP_ALGS},
929 1, /* scalar_stmt_cost. */
930 1, /* scalar load_cost. */
931 1, /* scalar_store_cost. */
932 1, /* vec_stmt_cost. */
933 1, /* vec_to_scalar_cost. */
934 1, /* scalar_to_vec_cost. */
935 1, /* vec_align_load_cost. */
936 2, /* vec_unalign_load_cost. */
937 1, /* vec_store_cost. */
938 3, /* cond_taken_branch_cost. */
939 1, /* cond_not_taken_branch_cost. */
943 struct processor_costs k6_cost = {
944 COSTS_N_INSNS (1), /* cost of an add instruction */
945 COSTS_N_INSNS (2), /* cost of a lea instruction */
946 COSTS_N_INSNS (1), /* variable shift costs */
947 COSTS_N_INSNS (1), /* constant shift costs */
948 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
949 COSTS_N_INSNS (3), /* HI */
950 COSTS_N_INSNS (3), /* SI */
951 COSTS_N_INSNS (3), /* DI */
952 COSTS_N_INSNS (3)}, /* other */
953 0, /* cost of multiply per each bit set */
954 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
955 COSTS_N_INSNS (18), /* HI */
956 COSTS_N_INSNS (18), /* SI */
957 COSTS_N_INSNS (18), /* DI */
958 COSTS_N_INSNS (18)}, /* other */
959 COSTS_N_INSNS (2), /* cost of movsx */
960 COSTS_N_INSNS (2), /* cost of movzx */
961 8, /* "large" insn */
963 3, /* cost for loading QImode using movzbl */
964 {4, 5, 4}, /* cost of loading integer registers
965 in QImode, HImode and SImode.
966 Relative to reg-reg move (2). */
967 {2, 3, 2}, /* cost of storing integer registers */
968 4, /* cost of reg,reg fld/fst */
969 {6, 6, 6}, /* cost of loading fp registers
970 in SFmode, DFmode and XFmode */
971 {4, 4, 4}, /* cost of storing fp registers
972 in SFmode, DFmode and XFmode */
973 2, /* cost of moving MMX register */
974 {2, 2}, /* cost of loading MMX registers
975 in SImode and DImode */
976 {2, 2}, /* cost of storing MMX registers
977 in SImode and DImode */
978 2, /* cost of moving SSE register */
979 {2, 2, 8}, /* cost of loading SSE registers
980 in SImode, DImode and TImode */
981 {2, 2, 8}, /* cost of storing SSE registers
982 in SImode, DImode and TImode */
983 6, /* MMX or SSE register to integer */
984 32, /* size of l1 cache. */
985 32, /* size of l2 cache. Some models
986 have integrated l2 cache, but
987 optimizing for k6 is not important
988 enough to worry about that. */
989 32, /* size of prefetch block */
990 1, /* number of parallel prefetches */
992 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
993 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
994 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
995 COSTS_N_INSNS (2), /* cost of FABS instruction. */
996 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
997 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
998 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
999 DUMMY_STRINGOP_ALGS},
1000 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
1001 DUMMY_STRINGOP_ALGS},
1002 1, /* scalar_stmt_cost. */
1003 1, /* scalar load_cost. */
1004 1, /* scalar_store_cost. */
1005 1, /* vec_stmt_cost. */
1006 1, /* vec_to_scalar_cost. */
1007 1, /* scalar_to_vec_cost. */
1008 1, /* vec_align_load_cost. */
1009 2, /* vec_unalign_load_cost. */
1010 1, /* vec_store_cost. */
1011 3, /* cond_taken_branch_cost. */
1012 1, /* cond_not_taken_branch_cost. */
1016 struct processor_costs athlon_cost = {
1017 COSTS_N_INSNS (1), /* cost of an add instruction */
1018 COSTS_N_INSNS (2), /* cost of a lea instruction */
1019 COSTS_N_INSNS (1), /* variable shift costs */
1020 COSTS_N_INSNS (1), /* constant shift costs */
1021 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
1022 COSTS_N_INSNS (5), /* HI */
1023 COSTS_N_INSNS (5), /* SI */
1024 COSTS_N_INSNS (5), /* DI */
1025 COSTS_N_INSNS (5)}, /* other */
1026 0, /* cost of multiply per each bit set */
1027 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1028 COSTS_N_INSNS (26), /* HI */
1029 COSTS_N_INSNS (42), /* SI */
1030 COSTS_N_INSNS (74), /* DI */
1031 COSTS_N_INSNS (74)}, /* other */
1032 COSTS_N_INSNS (1), /* cost of movsx */
1033 COSTS_N_INSNS (1), /* cost of movzx */
1034 8, /* "large" insn */
1036 4, /* cost for loading QImode using movzbl */
1037 {3, 4, 3}, /* cost of loading integer registers
1038 in QImode, HImode and SImode.
1039 Relative to reg-reg move (2). */
1040 {3, 4, 3}, /* cost of storing integer registers */
1041 4, /* cost of reg,reg fld/fst */
1042 {4, 4, 12}, /* cost of loading fp registers
1043 in SFmode, DFmode and XFmode */
1044 {6, 6, 8}, /* cost of storing fp registers
1045 in SFmode, DFmode and XFmode */
1046 2, /* cost of moving MMX register */
1047 {4, 4}, /* cost of loading MMX registers
1048 in SImode and DImode */
1049 {4, 4}, /* cost of storing MMX registers
1050 in SImode and DImode */
1051 2, /* cost of moving SSE register */
1052 {4, 4, 6}, /* cost of loading SSE registers
1053 in SImode, DImode and TImode */
1054 {4, 4, 5}, /* cost of storing SSE registers
1055 in SImode, DImode and TImode */
1056 5, /* MMX or SSE register to integer */
1057 64, /* size of l1 cache. */
1058 256, /* size of l2 cache. */
1059 64, /* size of prefetch block */
1060 6, /* number of parallel prefetches */
1061 5, /* Branch cost */
1062 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1063 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1064 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
1065 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1066 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1067 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1068 /* For some reason, Athlon deals better with REP prefix (relative to loops)
1069 compared to K8. Alignment becomes important after 8 bytes for memcpy and
1070 128 bytes for memset. */
1071 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1072 DUMMY_STRINGOP_ALGS},
1073 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
1074 DUMMY_STRINGOP_ALGS},
1075 1, /* scalar_stmt_cost. */
1076 1, /* scalar load_cost. */
1077 1, /* scalar_store_cost. */
1078 1, /* vec_stmt_cost. */
1079 1, /* vec_to_scalar_cost. */
1080 1, /* scalar_to_vec_cost. */
1081 1, /* vec_align_load_cost. */
1082 2, /* vec_unalign_load_cost. */
1083 1, /* vec_store_cost. */
1084 3, /* cond_taken_branch_cost. */
1085 1, /* cond_not_taken_branch_cost. */
1089 struct processor_costs k8_cost = {
1090 COSTS_N_INSNS (1), /* cost of an add instruction */
1091 COSTS_N_INSNS (2), /* cost of a lea instruction */
1092 COSTS_N_INSNS (1), /* variable shift costs */
1093 COSTS_N_INSNS (1), /* constant shift costs */
1094 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1095 COSTS_N_INSNS (4), /* HI */
1096 COSTS_N_INSNS (3), /* SI */
1097 COSTS_N_INSNS (4), /* DI */
1098 COSTS_N_INSNS (5)}, /* other */
1099 0, /* cost of multiply per each bit set */
1100 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1101 COSTS_N_INSNS (26), /* HI */
1102 COSTS_N_INSNS (42), /* SI */
1103 COSTS_N_INSNS (74), /* DI */
1104 COSTS_N_INSNS (74)}, /* other */
1105 COSTS_N_INSNS (1), /* cost of movsx */
1106 COSTS_N_INSNS (1), /* cost of movzx */
1107 8, /* "large" insn */
1109 4, /* cost for loading QImode using movzbl */
1110 {3, 4, 3}, /* cost of loading integer registers
1111 in QImode, HImode and SImode.
1112 Relative to reg-reg move (2). */
1113 {3, 4, 3}, /* cost of storing integer registers */
1114 4, /* cost of reg,reg fld/fst */
1115 {4, 4, 12}, /* cost of loading fp registers
1116 in SFmode, DFmode and XFmode */
1117 {6, 6, 8}, /* cost of storing fp registers
1118 in SFmode, DFmode and XFmode */
1119 2, /* cost of moving MMX register */
1120 {3, 3}, /* cost of loading MMX registers
1121 in SImode and DImode */
1122 {4, 4}, /* cost of storing MMX registers
1123 in SImode and DImode */
1124 2, /* cost of moving SSE register */
1125 {4, 3, 6}, /* cost of loading SSE registers
1126 in SImode, DImode and TImode */
1127 {4, 4, 5}, /* cost of storing SSE registers
1128 in SImode, DImode and TImode */
1129 5, /* MMX or SSE register to integer */
1130 64, /* size of l1 cache. */
1131 512, /* size of l2 cache. */
1132 64, /* size of prefetch block */
1133 /* New AMD processors never drop prefetches; if they cannot be performed
1134 immediately, they are queued. We set number of simultaneous prefetches
1135 to a large constant to reflect this (it probably is not a good idea not
1136 to limit number of prefetches at all, as their execution also takes some
1138 100, /* number of parallel prefetches */
1139 3, /* Branch cost */
1140 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1141 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1142 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1143 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1144 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1145 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1146 /* K8 has optimized REP instruction for medium sized blocks, but for very
1147 small blocks it is better to use loop. For large blocks, libcall can
1148 do nontemporary accesses and beat inline considerably. */
1149 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1150 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1151 {{libcall, {{8, loop}, {24, unrolled_loop},
1152 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1153 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1154 4, /* scalar_stmt_cost. */
1155 2, /* scalar load_cost. */
1156 2, /* scalar_store_cost. */
1157 5, /* vec_stmt_cost. */
1158 0, /* vec_to_scalar_cost. */
1159 2, /* scalar_to_vec_cost. */
1160 2, /* vec_align_load_cost. */
1161 3, /* vec_unalign_load_cost. */
1162 3, /* vec_store_cost. */
1163 3, /* cond_taken_branch_cost. */
1164 2, /* cond_not_taken_branch_cost. */
1167 struct processor_costs amdfam10_cost = {
1168 COSTS_N_INSNS (1), /* cost of an add instruction */
1169 COSTS_N_INSNS (2), /* cost of a lea instruction */
1170 COSTS_N_INSNS (1), /* variable shift costs */
1171 COSTS_N_INSNS (1), /* constant shift costs */
1172 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1173 COSTS_N_INSNS (4), /* HI */
1174 COSTS_N_INSNS (3), /* SI */
1175 COSTS_N_INSNS (4), /* DI */
1176 COSTS_N_INSNS (5)}, /* other */
1177 0, /* cost of multiply per each bit set */
1178 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1179 COSTS_N_INSNS (35), /* HI */
1180 COSTS_N_INSNS (51), /* SI */
1181 COSTS_N_INSNS (83), /* DI */
1182 COSTS_N_INSNS (83)}, /* other */
1183 COSTS_N_INSNS (1), /* cost of movsx */
1184 COSTS_N_INSNS (1), /* cost of movzx */
1185 8, /* "large" insn */
1187 4, /* cost for loading QImode using movzbl */
1188 {3, 4, 3}, /* cost of loading integer registers
1189 in QImode, HImode and SImode.
1190 Relative to reg-reg move (2). */
1191 {3, 4, 3}, /* cost of storing integer registers */
1192 4, /* cost of reg,reg fld/fst */
1193 {4, 4, 12}, /* cost of loading fp registers
1194 in SFmode, DFmode and XFmode */
1195 {6, 6, 8}, /* cost of storing fp registers
1196 in SFmode, DFmode and XFmode */
1197 2, /* cost of moving MMX register */
1198 {3, 3}, /* cost of loading MMX registers
1199 in SImode and DImode */
1200 {4, 4}, /* cost of storing MMX registers
1201 in SImode and DImode */
1202 2, /* cost of moving SSE register */
1203 {4, 4, 3}, /* cost of loading SSE registers
1204 in SImode, DImode and TImode */
1205 {4, 4, 5}, /* cost of storing SSE registers
1206 in SImode, DImode and TImode */
1207 3, /* MMX or SSE register to integer */
1209 MOVD reg64, xmmreg Double FSTORE 4
1210 MOVD reg32, xmmreg Double FSTORE 4
1212 MOVD reg64, xmmreg Double FADD 3
1214 MOVD reg32, xmmreg Double FADD 3
1216 64, /* size of l1 cache. */
1217 512, /* size of l2 cache. */
1218 64, /* size of prefetch block */
1219 /* New AMD processors never drop prefetches; if they cannot be performed
1220 immediately, they are queued. We set number of simultaneous prefetches
1221 to a large constant to reflect this (it probably is not a good idea not
1222 to limit number of prefetches at all, as their execution also takes some
1224 100, /* number of parallel prefetches */
1225 2, /* Branch cost */
1226 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1227 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1228 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1229 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1230 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1231 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1233 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
1234 very small blocks it is better to use loop. For large blocks, libcall can
1235 do nontemporary accesses and beat inline considerably. */
1236 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1237 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1238 {{libcall, {{8, loop}, {24, unrolled_loop},
1239 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1240 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1241 4, /* scalar_stmt_cost. */
1242 2, /* scalar load_cost. */
1243 2, /* scalar_store_cost. */
1244 6, /* vec_stmt_cost. */
1245 0, /* vec_to_scalar_cost. */
1246 2, /* scalar_to_vec_cost. */
1247 2, /* vec_align_load_cost. */
1248 2, /* vec_unalign_load_cost. */
1249 2, /* vec_store_cost. */
1250 2, /* cond_taken_branch_cost. */
1251 1, /* cond_not_taken_branch_cost. */
1254 struct processor_costs bdver1_cost = {
1255 COSTS_N_INSNS (1), /* cost of an add instruction */
1256 COSTS_N_INSNS (1), /* cost of a lea instruction */
1257 COSTS_N_INSNS (1), /* variable shift costs */
1258 COSTS_N_INSNS (1), /* constant shift costs */
1259 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1260 COSTS_N_INSNS (4), /* HI */
1261 COSTS_N_INSNS (4), /* SI */
1262 COSTS_N_INSNS (6), /* DI */
1263 COSTS_N_INSNS (6)}, /* other */
1264 0, /* cost of multiply per each bit set */
1265 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1266 COSTS_N_INSNS (35), /* HI */
1267 COSTS_N_INSNS (51), /* SI */
1268 COSTS_N_INSNS (83), /* DI */
1269 COSTS_N_INSNS (83)}, /* other */
1270 COSTS_N_INSNS (1), /* cost of movsx */
1271 COSTS_N_INSNS (1), /* cost of movzx */
1272 8, /* "large" insn */
1274 4, /* cost for loading QImode using movzbl */
1275 {5, 5, 4}, /* cost of loading integer registers
1276 in QImode, HImode and SImode.
1277 Relative to reg-reg move (2). */
1278 {4, 4, 4}, /* cost of storing integer registers */
1279 2, /* cost of reg,reg fld/fst */
1280 {5, 5, 12}, /* cost of loading fp registers
1281 in SFmode, DFmode and XFmode */
1282 {4, 4, 8}, /* cost of storing fp registers
1283 in SFmode, DFmode and XFmode */
1284 2, /* cost of moving MMX register */
1285 {4, 4}, /* cost of loading MMX registers
1286 in SImode and DImode */
1287 {4, 4}, /* cost of storing MMX registers
1288 in SImode and DImode */
1289 2, /* cost of moving SSE register */
1290 {4, 4, 4}, /* cost of loading SSE registers
1291 in SImode, DImode and TImode */
1292 {4, 4, 4}, /* cost of storing SSE registers
1293 in SImode, DImode and TImode */
1294 2, /* MMX or SSE register to integer */
1296 MOVD reg64, xmmreg Double FSTORE 4
1297 MOVD reg32, xmmreg Double FSTORE 4
1299 MOVD reg64, xmmreg Double FADD 3
1301 MOVD reg32, xmmreg Double FADD 3
1303 16, /* size of l1 cache. */
1304 2048, /* size of l2 cache. */
1305 64, /* size of prefetch block */
1306 /* New AMD processors never drop prefetches; if they cannot be performed
1307 immediately, they are queued. We set number of simultaneous prefetches
1308 to a large constant to reflect this (it probably is not a good idea not
1309 to limit number of prefetches at all, as their execution also takes some
1311 100, /* number of parallel prefetches */
1312 2, /* Branch cost */
1313 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1314 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1315 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1316 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1317 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1318 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1320 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
1321 very small blocks it is better to use loop. For large blocks, libcall
1322 can do nontemporary accesses and beat inline considerably. */
1323 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1324 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1325 {{libcall, {{8, loop}, {24, unrolled_loop},
1326 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1327 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1328 6, /* scalar_stmt_cost. */
1329 4, /* scalar load_cost. */
1330 4, /* scalar_store_cost. */
1331 6, /* vec_stmt_cost. */
1332 0, /* vec_to_scalar_cost. */
1333 2, /* scalar_to_vec_cost. */
1334 4, /* vec_align_load_cost. */
1335 4, /* vec_unalign_load_cost. */
1336 4, /* vec_store_cost. */
1337 2, /* cond_taken_branch_cost. */
1338 1, /* cond_not_taken_branch_cost. */
1341 struct processor_costs bdver2_cost = {
1342 COSTS_N_INSNS (1), /* cost of an add instruction */
1343 COSTS_N_INSNS (1), /* cost of a lea instruction */
1344 COSTS_N_INSNS (1), /* variable shift costs */
1345 COSTS_N_INSNS (1), /* constant shift costs */
1346 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
1347 COSTS_N_INSNS (4), /* HI */
1348 COSTS_N_INSNS (4), /* SI */
1349 COSTS_N_INSNS (6), /* DI */
1350 COSTS_N_INSNS (6)}, /* other */
1351 0, /* cost of multiply per each bit set */
1352 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1353 COSTS_N_INSNS (35), /* HI */
1354 COSTS_N_INSNS (51), /* SI */
1355 COSTS_N_INSNS (83), /* DI */
1356 COSTS_N_INSNS (83)}, /* other */
1357 COSTS_N_INSNS (1), /* cost of movsx */
1358 COSTS_N_INSNS (1), /* cost of movzx */
1359 8, /* "large" insn */
1361 4, /* cost for loading QImode using movzbl */
1362 {5, 5, 4}, /* cost of loading integer registers
1363 in QImode, HImode and SImode.
1364 Relative to reg-reg move (2). */
1365 {4, 4, 4}, /* cost of storing integer registers */
1366 2, /* cost of reg,reg fld/fst */
1367 {5, 5, 12}, /* cost of loading fp registers
1368 in SFmode, DFmode and XFmode */
1369 {4, 4, 8}, /* cost of storing fp registers
1370 in SFmode, DFmode and XFmode */
1371 2, /* cost of moving MMX register */
1372 {4, 4}, /* cost of loading MMX registers
1373 in SImode and DImode */
1374 {4, 4}, /* cost of storing MMX registers
1375 in SImode and DImode */
1376 2, /* cost of moving SSE register */
1377 {4, 4, 4}, /* cost of loading SSE registers
1378 in SImode, DImode and TImode */
1379 {4, 4, 4}, /* cost of storing SSE registers
1380 in SImode, DImode and TImode */
1381 2, /* MMX or SSE register to integer */
1383 MOVD reg64, xmmreg Double FSTORE 4
1384 MOVD reg32, xmmreg Double FSTORE 4
1386 MOVD reg64, xmmreg Double FADD 3
1388 MOVD reg32, xmmreg Double FADD 3
1390 16, /* size of l1 cache. */
1391 2048, /* size of l2 cache. */
1392 64, /* size of prefetch block */
1393 /* New AMD processors never drop prefetches; if they cannot be performed
1394 immediately, they are queued. We set number of simultaneous prefetches
1395 to a large constant to reflect this (it probably is not a good idea not
1396 to limit number of prefetches at all, as their execution also takes some
1398 100, /* number of parallel prefetches */
1399 2, /* Branch cost */
1400 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1401 COSTS_N_INSNS (6), /* cost of FMUL instruction. */
1402 COSTS_N_INSNS (42), /* cost of FDIV instruction. */
1403 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1404 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1405 COSTS_N_INSNS (52), /* cost of FSQRT instruction. */
1407 /* BDVER2 has optimized REP instruction for medium sized blocks, but for
1408 very small blocks it is better to use loop. For large blocks, libcall
1409 can do nontemporary accesses and beat inline considerably. */
1410 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1411 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1412 {{libcall, {{8, loop}, {24, unrolled_loop},
1413 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1414 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1415 6, /* scalar_stmt_cost. */
1416 4, /* scalar load_cost. */
1417 4, /* scalar_store_cost. */
1418 6, /* vec_stmt_cost. */
1419 0, /* vec_to_scalar_cost. */
1420 2, /* scalar_to_vec_cost. */
1421 4, /* vec_align_load_cost. */
1422 4, /* vec_unalign_load_cost. */
1423 4, /* vec_store_cost. */
1424 2, /* cond_taken_branch_cost. */
1425 1, /* cond_not_taken_branch_cost. */
1428 struct processor_costs btver1_cost = {
1429 COSTS_N_INSNS (1), /* cost of an add instruction */
1430 COSTS_N_INSNS (2), /* cost of a lea instruction */
1431 COSTS_N_INSNS (1), /* variable shift costs */
1432 COSTS_N_INSNS (1), /* constant shift costs */
1433 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1434 COSTS_N_INSNS (4), /* HI */
1435 COSTS_N_INSNS (3), /* SI */
1436 COSTS_N_INSNS (4), /* DI */
1437 COSTS_N_INSNS (5)}, /* other */
1438 0, /* cost of multiply per each bit set */
1439 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
1440 COSTS_N_INSNS (35), /* HI */
1441 COSTS_N_INSNS (51), /* SI */
1442 COSTS_N_INSNS (83), /* DI */
1443 COSTS_N_INSNS (83)}, /* other */
1444 COSTS_N_INSNS (1), /* cost of movsx */
1445 COSTS_N_INSNS (1), /* cost of movzx */
1446 8, /* "large" insn */
1448 4, /* cost for loading QImode using movzbl */
1449 {3, 4, 3}, /* cost of loading integer registers
1450 in QImode, HImode and SImode.
1451 Relative to reg-reg move (2). */
1452 {3, 4, 3}, /* cost of storing integer registers */
1453 4, /* cost of reg,reg fld/fst */
1454 {4, 4, 12}, /* cost of loading fp registers
1455 in SFmode, DFmode and XFmode */
1456 {6, 6, 8}, /* cost of storing fp registers
1457 in SFmode, DFmode and XFmode */
1458 2, /* cost of moving MMX register */
1459 {3, 3}, /* cost of loading MMX registers
1460 in SImode and DImode */
1461 {4, 4}, /* cost of storing MMX registers
1462 in SImode and DImode */
1463 2, /* cost of moving SSE register */
1464 {4, 4, 3}, /* cost of loading SSE registers
1465 in SImode, DImode and TImode */
1466 {4, 4, 5}, /* cost of storing SSE registers
1467 in SImode, DImode and TImode */
1468 3, /* MMX or SSE register to integer */
1470 MOVD reg64, xmmreg Double FSTORE 4
1471 MOVD reg32, xmmreg Double FSTORE 4
1473 MOVD reg64, xmmreg Double FADD 3
1475 MOVD reg32, xmmreg Double FADD 3
1477 32, /* size of l1 cache. */
1478 512, /* size of l2 cache. */
1479 64, /* size of prefetch block */
1480 100, /* number of parallel prefetches */
1481 2, /* Branch cost */
1482 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
1483 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
1484 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
1485 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1486 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1487 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
1489 /* BTVER1 has optimized REP instruction for medium sized blocks, but for
1490 very small blocks it is better to use loop. For large blocks, libcall can
1491 do nontemporary accesses and beat inline considerably. */
1492 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
1493 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1494 {{libcall, {{8, loop}, {24, unrolled_loop},
1495 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1496 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1497 4, /* scalar_stmt_cost. */
1498 2, /* scalar load_cost. */
1499 2, /* scalar_store_cost. */
1500 6, /* vec_stmt_cost. */
1501 0, /* vec_to_scalar_cost. */
1502 2, /* scalar_to_vec_cost. */
1503 2, /* vec_align_load_cost. */
1504 2, /* vec_unalign_load_cost. */
1505 2, /* vec_store_cost. */
1506 2, /* cond_taken_branch_cost. */
1507 1, /* cond_not_taken_branch_cost. */
1511 struct processor_costs pentium4_cost = {
1512 COSTS_N_INSNS (1), /* cost of an add instruction */
1513 COSTS_N_INSNS (3), /* cost of a lea instruction */
1514 COSTS_N_INSNS (4), /* variable shift costs */
1515 COSTS_N_INSNS (4), /* constant shift costs */
1516 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
1517 COSTS_N_INSNS (15), /* HI */
1518 COSTS_N_INSNS (15), /* SI */
1519 COSTS_N_INSNS (15), /* DI */
1520 COSTS_N_INSNS (15)}, /* other */
1521 0, /* cost of multiply per each bit set */
1522 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
1523 COSTS_N_INSNS (56), /* HI */
1524 COSTS_N_INSNS (56), /* SI */
1525 COSTS_N_INSNS (56), /* DI */
1526 COSTS_N_INSNS (56)}, /* other */
1527 COSTS_N_INSNS (1), /* cost of movsx */
1528 COSTS_N_INSNS (1), /* cost of movzx */
1529 16, /* "large" insn */
1531 2, /* cost for loading QImode using movzbl */
1532 {4, 5, 4}, /* cost of loading integer registers
1533 in QImode, HImode and SImode.
1534 Relative to reg-reg move (2). */
1535 {2, 3, 2}, /* cost of storing integer registers */
1536 2, /* cost of reg,reg fld/fst */
1537 {2, 2, 6}, /* cost of loading fp registers
1538 in SFmode, DFmode and XFmode */
1539 {4, 4, 6}, /* cost of storing fp registers
1540 in SFmode, DFmode and XFmode */
1541 2, /* cost of moving MMX register */
1542 {2, 2}, /* cost of loading MMX registers
1543 in SImode and DImode */
1544 {2, 2}, /* cost of storing MMX registers
1545 in SImode and DImode */
1546 12, /* cost of moving SSE register */
1547 {12, 12, 12}, /* cost of loading SSE registers
1548 in SImode, DImode and TImode */
1549 {2, 2, 8}, /* cost of storing SSE registers
1550 in SImode, DImode and TImode */
1551 10, /* MMX or SSE register to integer */
1552 8, /* size of l1 cache. */
1553 256, /* size of l2 cache. */
1554 64, /* size of prefetch block */
1555 6, /* number of parallel prefetches */
1556 2, /* Branch cost */
1557 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
1558 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
1559 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
1560 COSTS_N_INSNS (2), /* cost of FABS instruction. */
1561 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
1562 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
1563 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1564 DUMMY_STRINGOP_ALGS},
1565 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1567 DUMMY_STRINGOP_ALGS},
1568 1, /* scalar_stmt_cost. */
1569 1, /* scalar load_cost. */
1570 1, /* scalar_store_cost. */
1571 1, /* vec_stmt_cost. */
1572 1, /* vec_to_scalar_cost. */
1573 1, /* scalar_to_vec_cost. */
1574 1, /* vec_align_load_cost. */
1575 2, /* vec_unalign_load_cost. */
1576 1, /* vec_store_cost. */
1577 3, /* cond_taken_branch_cost. */
1578 1, /* cond_not_taken_branch_cost. */
1582 struct processor_costs nocona_cost = {
1583 COSTS_N_INSNS (1), /* cost of an add instruction */
1584 COSTS_N_INSNS (1), /* cost of a lea instruction */
1585 COSTS_N_INSNS (1), /* variable shift costs */
1586 COSTS_N_INSNS (1), /* constant shift costs */
1587 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
1588 COSTS_N_INSNS (10), /* HI */
1589 COSTS_N_INSNS (10), /* SI */
1590 COSTS_N_INSNS (10), /* DI */
1591 COSTS_N_INSNS (10)}, /* other */
1592 0, /* cost of multiply per each bit set */
1593 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
1594 COSTS_N_INSNS (66), /* HI */
1595 COSTS_N_INSNS (66), /* SI */
1596 COSTS_N_INSNS (66), /* DI */
1597 COSTS_N_INSNS (66)}, /* other */
1598 COSTS_N_INSNS (1), /* cost of movsx */
1599 COSTS_N_INSNS (1), /* cost of movzx */
1600 16, /* "large" insn */
1601 17, /* MOVE_RATIO */
1602 4, /* cost for loading QImode using movzbl */
1603 {4, 4, 4}, /* cost of loading integer registers
1604 in QImode, HImode and SImode.
1605 Relative to reg-reg move (2). */
1606 {4, 4, 4}, /* cost of storing integer registers */
1607 3, /* cost of reg,reg fld/fst */
1608 {12, 12, 12}, /* cost of loading fp registers
1609 in SFmode, DFmode and XFmode */
1610 {4, 4, 4}, /* cost of storing fp registers
1611 in SFmode, DFmode and XFmode */
1612 6, /* cost of moving MMX register */
1613 {12, 12}, /* cost of loading MMX registers
1614 in SImode and DImode */
1615 {12, 12}, /* cost of storing MMX registers
1616 in SImode and DImode */
1617 6, /* cost of moving SSE register */
1618 {12, 12, 12}, /* cost of loading SSE registers
1619 in SImode, DImode and TImode */
1620 {12, 12, 12}, /* cost of storing SSE registers
1621 in SImode, DImode and TImode */
1622 8, /* MMX or SSE register to integer */
1623 8, /* size of l1 cache. */
1624 1024, /* size of l2 cache. */
1625 128, /* size of prefetch block */
1626 8, /* number of parallel prefetches */
1627 1, /* Branch cost */
1628 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1629 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1630 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1631 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1632 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1633 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1634 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1635 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1636 {100000, unrolled_loop}, {-1, libcall}}}},
1637 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1639 {libcall, {{24, loop}, {64, unrolled_loop},
1640 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1641 1, /* scalar_stmt_cost. */
1642 1, /* scalar load_cost. */
1643 1, /* scalar_store_cost. */
1644 1, /* vec_stmt_cost. */
1645 1, /* vec_to_scalar_cost. */
1646 1, /* scalar_to_vec_cost. */
1647 1, /* vec_align_load_cost. */
1648 2, /* vec_unalign_load_cost. */
1649 1, /* vec_store_cost. */
1650 3, /* cond_taken_branch_cost. */
1651 1, /* cond_not_taken_branch_cost. */
1655 struct processor_costs atom_cost = {
1656 COSTS_N_INSNS (1), /* cost of an add instruction */
1657 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1658 COSTS_N_INSNS (1), /* variable shift costs */
1659 COSTS_N_INSNS (1), /* constant shift costs */
1660 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1661 COSTS_N_INSNS (4), /* HI */
1662 COSTS_N_INSNS (3), /* SI */
1663 COSTS_N_INSNS (4), /* DI */
1664 COSTS_N_INSNS (2)}, /* other */
1665 0, /* cost of multiply per each bit set */
1666 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1667 COSTS_N_INSNS (26), /* HI */
1668 COSTS_N_INSNS (42), /* SI */
1669 COSTS_N_INSNS (74), /* DI */
1670 COSTS_N_INSNS (74)}, /* other */
1671 COSTS_N_INSNS (1), /* cost of movsx */
1672 COSTS_N_INSNS (1), /* cost of movzx */
1673 8, /* "large" insn */
1674 17, /* MOVE_RATIO */
1675 2, /* cost for loading QImode using movzbl */
1676 {4, 4, 4}, /* cost of loading integer registers
1677 in QImode, HImode and SImode.
1678 Relative to reg-reg move (2). */
1679 {4, 4, 4}, /* cost of storing integer registers */
1680 4, /* cost of reg,reg fld/fst */
1681 {12, 12, 12}, /* cost of loading fp registers
1682 in SFmode, DFmode and XFmode */
1683 {6, 6, 8}, /* cost of storing fp registers
1684 in SFmode, DFmode and XFmode */
1685 2, /* cost of moving MMX register */
1686 {8, 8}, /* cost of loading MMX registers
1687 in SImode and DImode */
1688 {8, 8}, /* cost of storing MMX registers
1689 in SImode and DImode */
1690 2, /* cost of moving SSE register */
1691 {8, 8, 8}, /* cost of loading SSE registers
1692 in SImode, DImode and TImode */
1693 {8, 8, 8}, /* cost of storing SSE registers
1694 in SImode, DImode and TImode */
1695 5, /* MMX or SSE register to integer */
1696 32, /* size of l1 cache. */
1697 256, /* size of l2 cache. */
1698 64, /* size of prefetch block */
1699 6, /* number of parallel prefetches */
1700 3, /* Branch cost */
1701 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1702 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1703 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1704 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1705 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1706 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1707 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1708 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1709 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1710 {{libcall, {{8, loop}, {15, unrolled_loop},
1711 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1712 {libcall, {{24, loop}, {32, unrolled_loop},
1713 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1714 1, /* scalar_stmt_cost. */
1715 1, /* scalar load_cost. */
1716 1, /* scalar_store_cost. */
1717 1, /* vec_stmt_cost. */
1718 1, /* vec_to_scalar_cost. */
1719 1, /* scalar_to_vec_cost. */
1720 1, /* vec_align_load_cost. */
1721 2, /* vec_unalign_load_cost. */
1722 1, /* vec_store_cost. */
1723 3, /* cond_taken_branch_cost. */
1724 1, /* cond_not_taken_branch_cost. */
1727 /* Generic64 should produce code tuned for Nocona and K8. */
1729 struct processor_costs generic64_cost = {
1730 COSTS_N_INSNS (1), /* cost of an add instruction */
1731 /* On all chips taken into consideration lea is 2 cycles and more. With
1732 this cost however our current implementation of synth_mult results in
1733 use of unnecessary temporary registers causing regression on several
1734 SPECfp benchmarks. */
1735 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1736 COSTS_N_INSNS (1), /* variable shift costs */
1737 COSTS_N_INSNS (1), /* constant shift costs */
1738 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1739 COSTS_N_INSNS (4), /* HI */
1740 COSTS_N_INSNS (3), /* SI */
1741 COSTS_N_INSNS (4), /* DI */
1742 COSTS_N_INSNS (2)}, /* other */
1743 0, /* cost of multiply per each bit set */
1744 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1745 COSTS_N_INSNS (26), /* HI */
1746 COSTS_N_INSNS (42), /* SI */
1747 COSTS_N_INSNS (74), /* DI */
1748 COSTS_N_INSNS (74)}, /* other */
1749 COSTS_N_INSNS (1), /* cost of movsx */
1750 COSTS_N_INSNS (1), /* cost of movzx */
1751 8, /* "large" insn */
1752 17, /* MOVE_RATIO */
1753 4, /* cost for loading QImode using movzbl */
1754 {4, 4, 4}, /* cost of loading integer registers
1755 in QImode, HImode and SImode.
1756 Relative to reg-reg move (2). */
1757 {4, 4, 4}, /* cost of storing integer registers */
1758 4, /* cost of reg,reg fld/fst */
1759 {12, 12, 12}, /* cost of loading fp registers
1760 in SFmode, DFmode and XFmode */
1761 {6, 6, 8}, /* cost of storing fp registers
1762 in SFmode, DFmode and XFmode */
1763 2, /* cost of moving MMX register */
1764 {8, 8}, /* cost of loading MMX registers
1765 in SImode and DImode */
1766 {8, 8}, /* cost of storing MMX registers
1767 in SImode and DImode */
1768 2, /* cost of moving SSE register */
1769 {8, 8, 8}, /* cost of loading SSE registers
1770 in SImode, DImode and TImode */
1771 {8, 8, 8}, /* cost of storing SSE registers
1772 in SImode, DImode and TImode */
1773 5, /* MMX or SSE register to integer */
1774 32, /* size of l1 cache. */
1775 512, /* size of l2 cache. */
1776 64, /* size of prefetch block */
1777 6, /* number of parallel prefetches */
1778 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this
1779 value is increased to perhaps more appropriate value of 5. */
1780 3, /* Branch cost */
1781 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1782 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1783 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1784 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1785 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1786 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1787 {DUMMY_STRINGOP_ALGS,
1788 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1789 {DUMMY_STRINGOP_ALGS,
1790 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1791 1, /* scalar_stmt_cost. */
1792 1, /* scalar load_cost. */
1793 1, /* scalar_store_cost. */
1794 1, /* vec_stmt_cost. */
1795 1, /* vec_to_scalar_cost. */
1796 1, /* scalar_to_vec_cost. */
1797 1, /* vec_align_load_cost. */
1798 2, /* vec_unalign_load_cost. */
1799 1, /* vec_store_cost. */
1800 3, /* cond_taken_branch_cost. */
1801 1, /* cond_not_taken_branch_cost. */
1804 /* Generic32 should produce code tuned for PPro, Pentium4, Nocona,
1807 struct processor_costs generic32_cost = {
1808 COSTS_N_INSNS (1), /* cost of an add instruction */
1809 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1810 COSTS_N_INSNS (1), /* variable shift costs */
1811 COSTS_N_INSNS (1), /* constant shift costs */
1812 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1813 COSTS_N_INSNS (4), /* HI */
1814 COSTS_N_INSNS (3), /* SI */
1815 COSTS_N_INSNS (4), /* DI */
1816 COSTS_N_INSNS (2)}, /* other */
1817 0, /* cost of multiply per each bit set */
1818 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1819 COSTS_N_INSNS (26), /* HI */
1820 COSTS_N_INSNS (42), /* SI */
1821 COSTS_N_INSNS (74), /* DI */
1822 COSTS_N_INSNS (74)}, /* other */
1823 COSTS_N_INSNS (1), /* cost of movsx */
1824 COSTS_N_INSNS (1), /* cost of movzx */
1825 8, /* "large" insn */
1826 17, /* MOVE_RATIO */
1827 4, /* cost for loading QImode using movzbl */
1828 {4, 4, 4}, /* cost of loading integer registers
1829 in QImode, HImode and SImode.
1830 Relative to reg-reg move (2). */
1831 {4, 4, 4}, /* cost of storing integer registers */
1832 4, /* cost of reg,reg fld/fst */
1833 {12, 12, 12}, /* cost of loading fp registers
1834 in SFmode, DFmode and XFmode */
1835 {6, 6, 8}, /* cost of storing fp registers
1836 in SFmode, DFmode and XFmode */
1837 2, /* cost of moving MMX register */
1838 {8, 8}, /* cost of loading MMX registers
1839 in SImode and DImode */
1840 {8, 8}, /* cost of storing MMX registers
1841 in SImode and DImode */
1842 2, /* cost of moving SSE register */
1843 {8, 8, 8}, /* cost of loading SSE registers
1844 in SImode, DImode and TImode */
1845 {8, 8, 8}, /* cost of storing SSE registers
1846 in SImode, DImode and TImode */
1847 5, /* MMX or SSE register to integer */
1848 32, /* size of l1 cache. */
1849 256, /* size of l2 cache. */
1850 64, /* size of prefetch block */
1851 6, /* number of parallel prefetches */
1852 3, /* Branch cost */
1853 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1854 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1855 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1856 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1857 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1858 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1859 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1860 DUMMY_STRINGOP_ALGS},
1861 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1862 DUMMY_STRINGOP_ALGS},
1863 1, /* scalar_stmt_cost. */
1864 1, /* scalar load_cost. */
1865 1, /* scalar_store_cost. */
1866 1, /* vec_stmt_cost. */
1867 1, /* vec_to_scalar_cost. */
1868 1, /* scalar_to_vec_cost. */
1869 1, /* vec_align_load_cost. */
1870 2, /* vec_unalign_load_cost. */
1871 1, /* vec_store_cost. */
1872 3, /* cond_taken_branch_cost. */
1873 1, /* cond_not_taken_branch_cost. */
1876 const struct processor_costs *ix86_cost = &pentium_cost;
1878 /* Processor feature/optimization bitmasks. */
1879 #define m_386 (1<<PROCESSOR_I386)
1880 #define m_486 (1<<PROCESSOR_I486)
1881 #define m_PENT (1<<PROCESSOR_PENTIUM)
1882 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1883 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1884 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1885 #define m_P4_NOCONA (m_PENT4 | m_NOCONA)
1886 #define m_CORE2_32 (1<<PROCESSOR_CORE2_32)
1887 #define m_CORE2_64 (1<<PROCESSOR_CORE2_64)
1888 #define m_COREI7_32 (1<<PROCESSOR_COREI7_32)
1889 #define m_COREI7_64 (1<<PROCESSOR_COREI7_64)
1890 #define m_COREI7 (m_COREI7_32 | m_COREI7_64)
1891 #define m_CORE2I7_32 (m_CORE2_32 | m_COREI7_32)
1892 #define m_CORE2I7_64 (m_CORE2_64 | m_COREI7_64)
1893 #define m_CORE2I7 (m_CORE2I7_32 | m_CORE2I7_64)
1894 #define m_ATOM (1<<PROCESSOR_ATOM)
1896 #define m_GEODE (1<<PROCESSOR_GEODE)
1897 #define m_K6 (1<<PROCESSOR_K6)
1898 #define m_K6_GEODE (m_K6 | m_GEODE)
1899 #define m_K8 (1<<PROCESSOR_K8)
1900 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1901 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1902 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1903 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1904 #define m_BDVER2 (1<<PROCESSOR_BDVER2)
1905 #define m_BDVER (m_BDVER1 | m_BDVER2)
1906 #define m_BTVER1 (1<<PROCESSOR_BTVER1)
1907 #define m_AMD_MULTIPLE (m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1)
1909 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1910 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1912 /* Generic instruction choice should be common subset of supported CPUs
1913 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1914 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1916 /* Feature tests against the various tunings. */
1917 unsigned char ix86_tune_features[X86_TUNE_LAST];
1919 /* Feature tests against the various tunings used to create ix86_tune_features
1920 based on the processor mask. */
1921 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1922 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1923 negatively, so enabling for Generic64 seems like good code size
1924 tradeoff. We can't enable it for 32bit generic because it does not
1925 work well with PPro base chips. */
1926 m_386 | m_CORE2I7_64 | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC64,
1928 /* X86_TUNE_PUSH_MEMORY */
1929 m_386 | m_P4_NOCONA | m_CORE2I7 | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
1931 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1934 /* X86_TUNE_UNROLL_STRLEN */
1935 m_486 | m_PENT | m_PPRO | m_ATOM | m_CORE2I7 | m_K6 | m_AMD_MULTIPLE | m_GENERIC,
1937 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1938 on simulation result. But after P4 was made, no performance benefit
1939 was observed with branch hints. It also increases the code size.
1940 As a result, icc never generates branch hints. */
1943 /* X86_TUNE_DOUBLE_WITH_ADD */
1946 /* X86_TUNE_USE_SAHF */
1947 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1 | m_GENERIC,
1949 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1950 partial dependencies. */
1951 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GEODE | m_AMD_MULTIPLE | m_GENERIC,
1953 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1954 register stalls on Generic32 compilation setting as well. However
1955 in current implementation the partial register stalls are not eliminated
1956 very well - they can be introduced via subregs synthesized by combine
1957 and can happen in caller/callee saving sequences. Because this option
1958 pays back little on PPro based chips and is in conflict with partial reg
1959 dependencies used by Athlon/P4 based chips, it is better to leave it off
1960 for generic32 for now. */
1963 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1964 m_CORE2I7 | m_GENERIC,
1966 /* X86_TUNE_USE_HIMODE_FIOP */
1967 m_386 | m_486 | m_K6_GEODE,
1969 /* X86_TUNE_USE_SIMODE_FIOP */
1970 ~(m_PENT | m_PPRO | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC),
1972 /* X86_TUNE_USE_MOV0 */
1975 /* X86_TUNE_USE_CLTD */
1976 ~(m_PENT | m_CORE2I7 | m_ATOM | m_K6 | m_GENERIC),
1978 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1981 /* X86_TUNE_SPLIT_LONG_MOVES */
1984 /* X86_TUNE_READ_MODIFY_WRITE */
1987 /* X86_TUNE_READ_MODIFY */
1990 /* X86_TUNE_PROMOTE_QIMODE */
1991 m_386 | m_486 | m_PENT | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
1993 /* X86_TUNE_FAST_PREFIX */
1994 ~(m_386 | m_486 | m_PENT),
1996 /* X86_TUNE_SINGLE_STRINGOP */
1997 m_386 | m_P4_NOCONA,
1999 /* X86_TUNE_QIMODE_MATH */
2002 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
2003 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
2004 might be considered for Generic32 if our scheme for avoiding partial
2005 stalls was more effective. */
2008 /* X86_TUNE_PROMOTE_QI_REGS */
2011 /* X86_TUNE_PROMOTE_HI_REGS */
2014 /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred
2015 over esp addition. */
2016 m_386 | m_486 | m_PENT | m_PPRO,
2018 /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred
2019 over esp addition. */
2022 /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
2023 over esp subtraction. */
2024 m_386 | m_486 | m_PENT | m_K6_GEODE,
2026 /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
2027 over esp subtraction. */
2028 m_PENT | m_K6_GEODE,
2030 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
2031 for DFmode copies */
2032 ~(m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GEODE | m_AMD_MULTIPLE | m_ATOM | m_GENERIC),
2034 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
2035 m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
2037 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
2038 conflict here in between PPro/Pentium4 based chips that thread 128bit
2039 SSE registers as single units versus K8 based chips that divide SSE
2040 registers to two 64bit halves. This knob promotes all store destinations
2041 to be 128bit to allow register renaming on 128bit SSE units, but usually
2042 results in one extra microop on 64bit SSE units. Experimental results
2043 shows that disabling this option on P4 brings over 20% SPECfp regression,
2044 while enabling it on K8 brings roughly 2.4% regression that can be partly
2045 masked by careful scheduling of moves. */
2046 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMDFAM10 | m_BDVER | m_GENERIC,
2048 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
2049 m_COREI7 | m_AMDFAM10 | m_BDVER | m_BTVER1,
2051 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
2054 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
2057 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
2058 are resolved on SSE register parts instead of whole registers, so we may
2059 maintain just lower part of scalar values in proper format leaving the
2060 upper part undefined. */
2063 /* X86_TUNE_SSE_TYPELESS_STORES */
2066 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
2067 m_PPRO | m_P4_NOCONA,
2069 /* X86_TUNE_MEMORY_MISMATCH_STALL */
2070 m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
2072 /* X86_TUNE_PROLOGUE_USING_MOVE */
2073 m_PPRO | m_CORE2I7 | m_ATOM | m_ATHLON_K8 | m_GENERIC,
2075 /* X86_TUNE_EPILOGUE_USING_MOVE */
2076 m_PPRO | m_CORE2I7 | m_ATOM | m_ATHLON_K8 | m_GENERIC,
2078 /* X86_TUNE_SHIFT1 */
2081 /* X86_TUNE_USE_FFREEP */
2084 /* X86_TUNE_INTER_UNIT_MOVES */
2085 ~(m_AMD_MULTIPLE | m_GENERIC),
2087 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
2088 ~(m_AMDFAM10 | m_BDVER ),
2090 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
2091 than 4 branch instructions in the 16 byte window. */
2092 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
2094 /* X86_TUNE_SCHEDULE */
2095 m_PENT | m_PPRO | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
2097 /* X86_TUNE_USE_BT */
2098 m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
2100 /* X86_TUNE_USE_INCDEC */
2101 ~(m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GENERIC),
2103 /* X86_TUNE_PAD_RETURNS */
2104 m_CORE2I7 | m_AMD_MULTIPLE | m_GENERIC,
2106 /* X86_TUNE_PAD_SHORT_FUNCTION: Pad short funtion. */
2109 /* X86_TUNE_EXT_80387_CONSTANTS */
2110 m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_ATHLON_K8 | m_GENERIC,
2112 /* X86_TUNE_SHORTEN_X87_SSE */
2115 /* X86_TUNE_AVOID_VECTOR_DECODE */
2116 m_CORE2I7_64 | m_K8 | m_GENERIC64,
2118 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
2119 and SImode multiply, but 386 and 486 do HImode multiply faster. */
2122 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
2123 vector path on AMD machines. */
2124 m_CORE2I7_64 | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1 | m_GENERIC64,
2126 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
2128 m_CORE2I7_64 | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1 | m_GENERIC64,
2130 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
2134 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
2135 but one byte longer. */
2138 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
2139 operand that cannot be represented using a modRM byte. The XOR
2140 replacement is long decoded, so this split helps here as well. */
2143 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
2145 m_CORE2I7 | m_AMDFAM10 | m_GENERIC,
2147 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
2148 from integer to FP. */
2151 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
2152 with a subsequent conditional jump instruction into a single
2153 compare-and-branch uop. */
2156 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
2157 will impact LEA instruction selection. */
2160 /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
2164 /* X86_SOFTARE_PREFETCHING_BENEFICIAL: Enable software prefetching
2165 at -O3. For the moment, the prefetching seems badly tuned for Intel
2167 m_K6_GEODE | m_AMD_MULTIPLE,
2169 /* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for
2170 the auto-vectorizer. */
2174 /* Feature tests against the various architecture variations. */
2175 unsigned char ix86_arch_features[X86_ARCH_LAST];
2177 /* Feature tests against the various architecture variations, used to create
2178 ix86_arch_features based on the processor mask. */
2179 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
2180 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
2181 ~(m_386 | m_486 | m_PENT | m_K6),
2183 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
2186 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
2189 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
2192 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
2196 static const unsigned int x86_accumulate_outgoing_args
2197 = m_PPRO | m_P4_NOCONA | m_ATOM | m_CORE2I7 | m_AMD_MULTIPLE | m_GENERIC;
2199 static const unsigned int x86_arch_always_fancy_math_387
2200 = m_PENT | m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC;
2202 static const unsigned int x86_avx256_split_unaligned_load
2203 = m_COREI7 | m_GENERIC;
2205 static const unsigned int x86_avx256_split_unaligned_store
2206 = m_COREI7 | m_BDVER | m_GENERIC;
2208 /* In case the average insn count for single function invocation is
2209 lower than this constant, emit fast (but longer) prologue and
2211 #define FAST_PROLOGUE_INSN_COUNT 20
2213 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
2214 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
2215 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
2216 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
2218 /* Array of the smallest class containing reg number REGNO, indexed by
2219 REGNO. Used by REGNO_REG_CLASS in i386.h. */
2221 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
2223 /* ax, dx, cx, bx */
2224 AREG, DREG, CREG, BREG,
2225 /* si, di, bp, sp */
2226 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
2228 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
2229 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
2232 /* flags, fpsr, fpcr, frame */
2233 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
2235 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2238 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
2241 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2242 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
2243 /* SSE REX registers */
2244 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
2248 /* The "default" register map used in 32bit mode. */
2250 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
2252 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
2253 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
2254 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2255 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
2256 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
2257 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2258 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2261 /* The "default" register map used in 64bit mode. */
2263 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
2265 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
2266 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
2267 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2268 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
2269 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
2270 8,9,10,11,12,13,14,15, /* extended integer registers */
2271 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
2274 /* Define the register numbers to be used in Dwarf debugging information.
2275 The SVR4 reference port C compiler uses the following register numbers
2276 in its Dwarf output code:
2277 0 for %eax (gcc regno = 0)
2278 1 for %ecx (gcc regno = 2)
2279 2 for %edx (gcc regno = 1)
2280 3 for %ebx (gcc regno = 3)
2281 4 for %esp (gcc regno = 7)
2282 5 for %ebp (gcc regno = 6)
2283 6 for %esi (gcc regno = 4)
2284 7 for %edi (gcc regno = 5)
2285 The following three DWARF register numbers are never generated by
2286 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
2287 believes these numbers have these meanings.
2288 8 for %eip (no gcc equivalent)
2289 9 for %eflags (gcc regno = 17)
2290 10 for %trapno (no gcc equivalent)
2291 It is not at all clear how we should number the FP stack registers
2292 for the x86 architecture. If the version of SDB on x86/svr4 were
2293 a bit less brain dead with respect to floating-point then we would
2294 have a precedent to follow with respect to DWARF register numbers
2295 for x86 FP registers, but the SDB on x86/svr4 is so completely
2296 broken with respect to FP registers that it is hardly worth thinking
2297 of it as something to strive for compatibility with.
2298 The version of x86/svr4 SDB I have at the moment does (partially)
2299 seem to believe that DWARF register number 11 is associated with
2300 the x86 register %st(0), but that's about all. Higher DWARF
2301 register numbers don't seem to be associated with anything in
2302 particular, and even for DWARF regno 11, SDB only seems to under-
2303 stand that it should say that a variable lives in %st(0) (when
2304 asked via an `=' command) if we said it was in DWARF regno 11,
2305 but SDB still prints garbage when asked for the value of the
2306 variable in question (via a `/' command).
2307 (Also note that the labels SDB prints for various FP stack regs
2308 when doing an `x' command are all wrong.)
2309 Note that these problems generally don't affect the native SVR4
2310 C compiler because it doesn't allow the use of -O with -g and
2311 because when it is *not* optimizing, it allocates a memory
2312 location for each floating-point variable, and the memory
2313 location is what gets described in the DWARF AT_location
2314 attribute for the variable in question.
2315 Regardless of the severe mental illness of the x86/svr4 SDB, we
2316 do something sensible here and we use the following DWARF
2317 register numbers. Note that these are all stack-top-relative
2319 11 for %st(0) (gcc regno = 8)
2320 12 for %st(1) (gcc regno = 9)
2321 13 for %st(2) (gcc regno = 10)
2322 14 for %st(3) (gcc regno = 11)
2323 15 for %st(4) (gcc regno = 12)
2324 16 for %st(5) (gcc regno = 13)
2325 17 for %st(6) (gcc regno = 14)
2326 18 for %st(7) (gcc regno = 15)
2328 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
2330 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
2331 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
2332 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
2333 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
2334 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
2335 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
2336 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
2339 /* Define parameter passing and return registers. */
2341 static int const x86_64_int_parameter_registers[6] =
2343 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
2346 static int const x86_64_ms_abi_int_parameter_registers[4] =
2348 CX_REG, DX_REG, R8_REG, R9_REG
2351 static int const x86_64_int_return_registers[4] =
2353 AX_REG, DX_REG, DI_REG, SI_REG
2356 /* Define the structure for the machine field in struct function. */
2358 struct GTY(()) stack_local_entry {
2359 unsigned short mode;
2362 struct stack_local_entry *next;
2365 /* Structure describing stack frame layout.
2366 Stack grows downward:
2372 saved static chain if ix86_static_chain_on_stack
2374 saved frame pointer if frame_pointer_needed
2375 <- HARD_FRAME_POINTER
2381 <- sse_regs_save_offset
2384 [va_arg registers] |
2388 [padding2] | = to_allocate
2397 int outgoing_arguments_size;
2398 HOST_WIDE_INT frame;
2400 /* The offsets relative to ARG_POINTER. */
2401 HOST_WIDE_INT frame_pointer_offset;
2402 HOST_WIDE_INT hard_frame_pointer_offset;
2403 HOST_WIDE_INT stack_pointer_offset;
2404 HOST_WIDE_INT hfp_save_offset;
2405 HOST_WIDE_INT reg_save_offset;
2406 HOST_WIDE_INT sse_reg_save_offset;
2408 /* When save_regs_using_mov is set, emit prologue using
2409 move instead of push instructions. */
2410 bool save_regs_using_mov;
2413 /* Which cpu are we scheduling for. */
2414 enum attr_cpu ix86_schedule;
2416 /* Which cpu are we optimizing for. */
2417 enum processor_type ix86_tune;
2419 /* Which instruction set architecture to use. */
2420 enum processor_type ix86_arch;
2422 /* true if sse prefetch instruction is not NOOP. */
2423 int x86_prefetch_sse;
2425 /* -mstackrealign option */
2426 static const char ix86_force_align_arg_pointer_string[]
2427 = "force_align_arg_pointer";
2429 static rtx (*ix86_gen_leave) (void);
2430 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
2431 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
2432 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
2433 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
2434 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
2435 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
2436 static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
2437 static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
2438 static rtx (*ix86_gen_probe_stack_range) (rtx, rtx, rtx);
2440 /* Preferred alignment for stack boundary in bits. */
2441 unsigned int ix86_preferred_stack_boundary;
2443 /* Alignment for incoming stack boundary in bits specified at
2445 static unsigned int ix86_user_incoming_stack_boundary;
2447 /* Default alignment for incoming stack boundary in bits. */
2448 static unsigned int ix86_default_incoming_stack_boundary;
2450 /* Alignment for incoming stack boundary in bits. */
2451 unsigned int ix86_incoming_stack_boundary;
2453 /* Calling abi specific va_list type nodes. */
2454 static GTY(()) tree sysv_va_list_type_node;
2455 static GTY(()) tree ms_va_list_type_node;
2457 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
2458 char internal_label_prefix[16];
2459 int internal_label_prefix_len;
2461 /* Fence to use after loop using movnt. */
2464 /* Register class used for passing given 64bit part of the argument.
2465 These represent classes as documented by the PS ABI, with the exception
2466 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
2467 use SF or DFmode move instead of DImode to avoid reformatting penalties.
2469 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
2470 whenever possible (upper half does contain padding). */
2471 enum x86_64_reg_class
2474 X86_64_INTEGER_CLASS,
2475 X86_64_INTEGERSI_CLASS,
2482 X86_64_COMPLEX_X87_CLASS,
2486 #define MAX_CLASSES 4
2488 /* Table of constants used by fldpi, fldln2, etc.... */
2489 static REAL_VALUE_TYPE ext_80387_constants_table [5];
2490 static bool ext_80387_constants_init = 0;
2493 static struct machine_function * ix86_init_machine_status (void);
2494 static rtx ix86_function_value (const_tree, const_tree, bool);
2495 static bool ix86_function_value_regno_p (const unsigned int);
2496 static unsigned int ix86_function_arg_boundary (enum machine_mode,
2498 static rtx ix86_static_chain (const_tree, bool);
2499 static int ix86_function_regparm (const_tree, const_tree);
2500 static void ix86_compute_frame_layout (struct ix86_frame *);
2501 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
2503 static void ix86_add_new_builtins (int);
2504 static rtx ix86_expand_vec_perm_builtin (tree);
2505 static tree ix86_canonical_va_list_type (tree);
2506 static void predict_jump (int);
2507 static unsigned int split_stack_prologue_scratch_regno (void);
2508 static bool i386_asm_output_addr_const_extra (FILE *, rtx);
2510 enum ix86_function_specific_strings
2512 IX86_FUNCTION_SPECIFIC_ARCH,
2513 IX86_FUNCTION_SPECIFIC_TUNE,
2514 IX86_FUNCTION_SPECIFIC_MAX
2517 static char *ix86_target_string (int, int, const char *, const char *,
2518 enum fpmath_unit, bool);
2519 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
2520 static void ix86_function_specific_save (struct cl_target_option *);
2521 static void ix86_function_specific_restore (struct cl_target_option *);
2522 static void ix86_function_specific_print (FILE *, int,
2523 struct cl_target_option *);
2524 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2525 static bool ix86_valid_target_attribute_inner_p (tree, char *[],
2526 struct gcc_options *);
2527 static bool ix86_can_inline_p (tree, tree);
2528 static void ix86_set_current_function (tree);
2529 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2531 static enum calling_abi ix86_function_abi (const_tree);
2534 #ifndef SUBTARGET32_DEFAULT_CPU
2535 #define SUBTARGET32_DEFAULT_CPU "i386"
2538 /* The svr4 ABI for the i386 says that records and unions are returned
2540 #ifndef DEFAULT_PCC_STRUCT_RETURN
2541 #define DEFAULT_PCC_STRUCT_RETURN 1
2544 /* Whether -mtune= or -march= were specified */
2545 static int ix86_tune_defaulted;
2546 static int ix86_arch_specified;
2548 /* Vectorization library interface and handlers. */
2549 static tree (*ix86_veclib_handler) (enum built_in_function, tree, tree);
2551 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2552 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2554 /* Processor target table, indexed by processor number */
2557 const struct processor_costs *cost; /* Processor costs */
2558 const int align_loop; /* Default alignments. */
2559 const int align_loop_max_skip;
2560 const int align_jump;
2561 const int align_jump_max_skip;
2562 const int align_func;
2565 static const struct ptt processor_target_table[PROCESSOR_max] =
2567 {&i386_cost, 4, 3, 4, 3, 4},
2568 {&i486_cost, 16, 15, 16, 15, 16},
2569 {&pentium_cost, 16, 7, 16, 7, 16},
2570 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2571 {&geode_cost, 0, 0, 0, 0, 0},
2572 {&k6_cost, 32, 7, 32, 7, 32},
2573 {&athlon_cost, 16, 7, 16, 7, 16},
2574 {&pentium4_cost, 0, 0, 0, 0, 0},
2575 {&k8_cost, 16, 7, 16, 7, 16},
2576 {&nocona_cost, 0, 0, 0, 0, 0},
2577 /* Core 2 32-bit. */
2578 {&generic32_cost, 16, 10, 16, 10, 16},
2579 /* Core 2 64-bit. */
2580 {&generic64_cost, 16, 10, 16, 10, 16},
2581 /* Core i7 32-bit. */
2582 {&generic32_cost, 16, 10, 16, 10, 16},
2583 /* Core i7 64-bit. */
2584 {&generic64_cost, 16, 10, 16, 10, 16},
2585 {&generic32_cost, 16, 7, 16, 7, 16},
2586 {&generic64_cost, 16, 10, 16, 10, 16},
2587 {&amdfam10_cost, 32, 24, 32, 7, 32},
2588 {&bdver1_cost, 32, 24, 32, 7, 32},
2589 {&bdver2_cost, 32, 24, 32, 7, 32},
2590 {&btver1_cost, 32, 24, 32, 7, 32},
2591 {&atom_cost, 16, 7, 16, 7, 16}
2594 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2624 /* Return true if a red-zone is in use. */
2627 ix86_using_red_zone (void)
2629 return TARGET_RED_ZONE && !TARGET_64BIT_MS_ABI;
2632 /* Return a string that documents the current -m options. The caller is
2633 responsible for freeing the string. */
2636 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2637 enum fpmath_unit fpmath, bool add_nl_p)
2639 struct ix86_target_opts
2641 const char *option; /* option string */
2642 int mask; /* isa mask options */
2645 /* This table is ordered so that options like -msse4.2 that imply
2646 preceding options while match those first. */
2647 static struct ix86_target_opts isa_opts[] =
2649 { "-m64", OPTION_MASK_ISA_64BIT },
2650 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2651 { "-mfma", OPTION_MASK_ISA_FMA },
2652 { "-mxop", OPTION_MASK_ISA_XOP },
2653 { "-mlwp", OPTION_MASK_ISA_LWP },
2654 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2655 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2656 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2657 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2658 { "-msse3", OPTION_MASK_ISA_SSE3 },
2659 { "-msse2", OPTION_MASK_ISA_SSE2 },
2660 { "-msse", OPTION_MASK_ISA_SSE },
2661 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2662 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2663 { "-mmmx", OPTION_MASK_ISA_MMX },
2664 { "-mabm", OPTION_MASK_ISA_ABM },
2665 { "-mbmi", OPTION_MASK_ISA_BMI },
2666 { "-mlzcnt", OPTION_MASK_ISA_LZCNT },
2667 { "-mtbm", OPTION_MASK_ISA_TBM },
2668 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2669 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2670 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2671 { "-maes", OPTION_MASK_ISA_AES },
2672 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2673 { "-mfsgsbase", OPTION_MASK_ISA_FSGSBASE },
2674 { "-mrdrnd", OPTION_MASK_ISA_RDRND },
2675 { "-mf16c", OPTION_MASK_ISA_F16C },
2679 static struct ix86_target_opts flag_opts[] =
2681 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2682 { "-m80387", MASK_80387 },
2683 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2684 { "-malign-double", MASK_ALIGN_DOUBLE },
2685 { "-mcld", MASK_CLD },
2686 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2687 { "-mieee-fp", MASK_IEEE_FP },
2688 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2689 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2690 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2691 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2692 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2693 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2694 { "-mno-red-zone", MASK_NO_RED_ZONE },
2695 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2696 { "-mrecip", MASK_RECIP },
2697 { "-mrtd", MASK_RTD },
2698 { "-msseregparm", MASK_SSEREGPARM },
2699 { "-mstack-arg-probe", MASK_STACK_PROBE },
2700 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2701 { "-mvect8-ret-in-mem", MASK_VECT8_RETURNS },
2702 { "-m8bit-idiv", MASK_USE_8BIT_IDIV },
2703 { "-mvzeroupper", MASK_VZEROUPPER },
2704 { "-mavx256-split-unaligned-load", MASK_AVX256_SPLIT_UNALIGNED_LOAD},
2705 { "-mavx256-split-unaligned-store", MASK_AVX256_SPLIT_UNALIGNED_STORE},
2706 { "-mprefer-avx128", MASK_PREFER_AVX128},
2709 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2712 char target_other[40];
2721 memset (opts, '\0', sizeof (opts));
2723 /* Add -march= option. */
2726 opts[num][0] = "-march=";
2727 opts[num++][1] = arch;
2730 /* Add -mtune= option. */
2733 opts[num][0] = "-mtune=";
2734 opts[num++][1] = tune;
2737 /* Pick out the options in isa options. */
2738 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2740 if ((isa & isa_opts[i].mask) != 0)
2742 opts[num++][0] = isa_opts[i].option;
2743 isa &= ~ isa_opts[i].mask;
2747 if (isa && add_nl_p)
2749 opts[num++][0] = isa_other;
2750 sprintf (isa_other, "(other isa: %#x)", isa);
2753 /* Add flag options. */
2754 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2756 if ((flags & flag_opts[i].mask) != 0)
2758 opts[num++][0] = flag_opts[i].option;
2759 flags &= ~ flag_opts[i].mask;
2763 if (flags && add_nl_p)
2765 opts[num++][0] = target_other;
2766 sprintf (target_other, "(other flags: %#x)", flags);
2769 /* Add -fpmath= option. */
2772 opts[num][0] = "-mfpmath=";
2773 switch ((int) fpmath)
2776 opts[num++][1] = "387";
2780 opts[num++][1] = "sse";
2783 case FPMATH_387 | FPMATH_SSE:
2784 opts[num++][1] = "sse+387";
2796 gcc_assert (num < ARRAY_SIZE (opts));
2798 /* Size the string. */
2800 sep_len = (add_nl_p) ? 3 : 1;
2801 for (i = 0; i < num; i++)
2804 for (j = 0; j < 2; j++)
2806 len += strlen (opts[i][j]);
2809 /* Build the string. */
2810 ret = ptr = (char *) xmalloc (len);
2813 for (i = 0; i < num; i++)
2817 for (j = 0; j < 2; j++)
2818 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2825 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2833 for (j = 0; j < 2; j++)
2836 memcpy (ptr, opts[i][j], len2[j]);
2838 line_len += len2[j];
2843 gcc_assert (ret + len >= ptr);
2848 /* Return true, if profiling code should be emitted before
2849 prologue. Otherwise it returns false.
2850 Note: For x86 with "hotfix" it is sorried. */
2852 ix86_profile_before_prologue (void)
2854 return flag_fentry != 0;
2857 /* Function that is callable from the debugger to print the current
2860 ix86_debug_options (void)
2862 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2863 ix86_arch_string, ix86_tune_string,
2868 fprintf (stderr, "%s\n\n", opts);
2872 fputs ("<no options>\n\n", stderr);
2877 /* Override various settings based on options. If MAIN_ARGS_P, the
2878 options are from the command line, otherwise they are from
2882 ix86_option_override_internal (bool main_args_p)
2885 unsigned int ix86_arch_mask, ix86_tune_mask;
2886 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2897 PTA_PREFETCH_SSE = 1 << 4,
2899 PTA_3DNOW_A = 1 << 6,
2903 PTA_POPCNT = 1 << 10,
2905 PTA_SSE4A = 1 << 12,
2906 PTA_NO_SAHF = 1 << 13,
2907 PTA_SSE4_1 = 1 << 14,
2908 PTA_SSE4_2 = 1 << 15,
2910 PTA_PCLMUL = 1 << 17,
2913 PTA_MOVBE = 1 << 20,
2917 PTA_FSGSBASE = 1 << 24,
2918 PTA_RDRND = 1 << 25,
2923 /* if this reaches 32, need to widen struct pta flags below */
2928 const char *const name; /* processor name or nickname. */
2929 const enum processor_type processor;
2930 const enum attr_cpu schedule;
2931 const unsigned /*enum pta_flags*/ flags;
2933 const processor_alias_table[] =
2935 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2936 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2937 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2938 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2939 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2940 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2941 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2942 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2943 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2944 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2945 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2946 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2947 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2949 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2951 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2952 PTA_MMX | PTA_SSE | PTA_SSE2},
2953 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2954 PTA_MMX |PTA_SSE | PTA_SSE2},
2955 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2956 PTA_MMX | PTA_SSE | PTA_SSE2},
2957 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2958 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2959 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2960 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2961 | PTA_CX16 | PTA_NO_SAHF},
2962 {"core2", PROCESSOR_CORE2_64, CPU_CORE2,
2963 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2964 | PTA_SSSE3 | PTA_CX16},
2965 {"corei7", PROCESSOR_COREI7_64, CPU_COREI7,
2966 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2967 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16},
2968 {"corei7-avx", PROCESSOR_COREI7_64, CPU_COREI7,
2969 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2970 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX
2971 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL},
2972 {"core-avx-i", PROCESSOR_COREI7_64, CPU_COREI7,
2973 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2974 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX
2975 | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL | PTA_FSGSBASE
2976 | PTA_RDRND | PTA_F16C},
2977 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2978 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2979 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2980 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2981 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2982 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2983 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2984 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2985 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2986 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2987 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2988 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2989 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2990 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2991 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2992 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2993 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2994 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2995 {"x86-64", PROCESSOR_K8, CPU_K8,
2996 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2997 {"k8", PROCESSOR_K8, CPU_K8,
2998 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2999 | PTA_SSE2 | PTA_NO_SAHF},
3000 {"k8-sse3", PROCESSOR_K8, CPU_K8,
3001 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3002 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3003 {"opteron", PROCESSOR_K8, CPU_K8,
3004 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3005 | PTA_SSE2 | PTA_NO_SAHF},
3006 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
3007 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3008 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3009 {"athlon64", PROCESSOR_K8, CPU_K8,
3010 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3011 | PTA_SSE2 | PTA_NO_SAHF},
3012 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
3013 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3014 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
3015 {"athlon-fx", PROCESSOR_K8, CPU_K8,
3016 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3017 | PTA_SSE2 | PTA_NO_SAHF},
3018 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3019 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3020 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3021 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
3022 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
3023 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
3024 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
3025 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3026 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
3027 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_FMA4
3028 | PTA_XOP | PTA_LWP},
3029 {"bdver2", PROCESSOR_BDVER2, CPU_BDVER2,
3030 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3031 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
3032 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX
3033 | PTA_XOP | PTA_LWP | PTA_BMI | PTA_TBM | PTA_F16C
3035 {"btver1", PROCESSOR_BTVER1, CPU_GENERIC64,
3036 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
3037 | PTA_SSSE3 | PTA_SSE4A |PTA_ABM | PTA_CX16},
3038 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
3039 0 /* flags are only used for -march switch. */ },
3040 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
3041 PTA_64BIT /* flags are only used for -march switch. */ },
3044 int const pta_size = ARRAY_SIZE (processor_alias_table);
3046 /* Set up prefix/suffix so the error messages refer to either the command
3047 line argument, or the attribute(target). */
3056 prefix = "option(\"";
3061 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3062 SUBTARGET_OVERRIDE_OPTIONS;
3065 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3066 SUBSUBTARGET_OVERRIDE_OPTIONS;
3070 ix86_isa_flags |= OPTION_MASK_ISA_64BIT;
3072 /* -fPIC is the default for x86_64. */
3073 if (TARGET_MACHO && TARGET_64BIT)
3076 /* Need to check -mtune=generic first. */
3077 if (ix86_tune_string)
3079 if (!strcmp (ix86_tune_string, "generic")
3080 || !strcmp (ix86_tune_string, "i686")
3081 /* As special support for cross compilers we read -mtune=native
3082 as -mtune=generic. With native compilers we won't see the
3083 -mtune=native, as it was changed by the driver. */
3084 || !strcmp (ix86_tune_string, "native"))
3087 ix86_tune_string = "generic64";
3089 ix86_tune_string = "generic32";
3091 /* If this call is for setting the option attribute, allow the
3092 generic32/generic64 that was previously set. */
3093 else if (!main_args_p
3094 && (!strcmp (ix86_tune_string, "generic32")
3095 || !strcmp (ix86_tune_string, "generic64")))
3097 else if (!strncmp (ix86_tune_string, "generic", 7))
3098 error ("bad value (%s) for %stune=%s %s",
3099 ix86_tune_string, prefix, suffix, sw);
3100 else if (!strcmp (ix86_tune_string, "x86-64"))
3101 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated; use "
3102 "%stune=k8%s or %stune=generic%s instead as appropriate",
3103 prefix, suffix, prefix, suffix, prefix, suffix);
3107 if (ix86_arch_string)
3108 ix86_tune_string = ix86_arch_string;
3109 if (!ix86_tune_string)
3111 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
3112 ix86_tune_defaulted = 1;
3115 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
3116 need to use a sensible tune option. */
3117 if (!strcmp (ix86_tune_string, "generic")
3118 || !strcmp (ix86_tune_string, "x86-64")
3119 || !strcmp (ix86_tune_string, "i686"))
3122 ix86_tune_string = "generic64";
3124 ix86_tune_string = "generic32";
3128 if (ix86_stringop_alg == rep_prefix_8_byte && !TARGET_64BIT)
3130 /* rep; movq isn't available in 32-bit code. */
3131 error ("-mstringop-strategy=rep_8byte not supported for 32-bit code");
3132 ix86_stringop_alg = no_stringop;
3135 if (!ix86_arch_string)
3136 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3138 ix86_arch_specified = 1;
3140 if (!global_options_set.x_ix86_abi)
3141 ix86_abi = DEFAULT_ABI;
3143 if (global_options_set.x_ix86_cmodel)
3145 switch (ix86_cmodel)
3150 ix86_cmodel = CM_SMALL_PIC;
3152 error ("code model %qs not supported in the %s bit mode",
3159 ix86_cmodel = CM_MEDIUM_PIC;
3161 error ("code model %qs not supported in the %s bit mode",
3163 else if (TARGET_X32)
3164 error ("code model %qs not supported in x32 mode",
3171 ix86_cmodel = CM_LARGE_PIC;
3173 error ("code model %qs not supported in the %s bit mode",
3175 else if (TARGET_X32)
3176 error ("code model %qs not supported in x32 mode",
3182 error ("code model %s does not support PIC mode", "32");
3184 error ("code model %qs not supported in the %s bit mode",
3191 error ("code model %s does not support PIC mode", "kernel");
3192 ix86_cmodel = CM_32;
3195 error ("code model %qs not supported in the %s bit mode",
3205 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3206 use of rip-relative addressing. This eliminates fixups that
3207 would otherwise be needed if this object is to be placed in a
3208 DLL, and is essentially just as efficient as direct addressing. */
3209 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3210 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3211 else if (TARGET_64BIT)
3212 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3214 ix86_cmodel = CM_32;
3216 if (TARGET_MACHO && ix86_asm_dialect == ASM_INTEL)
3218 error ("-masm=intel not supported in this configuration");
3219 ix86_asm_dialect = ASM_ATT;
3221 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3222 sorry ("%i-bit mode not compiled in",
3223 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3225 for (i = 0; i < pta_size; i++)
3226 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3228 ix86_schedule = processor_alias_table[i].schedule;
3229 ix86_arch = processor_alias_table[i].processor;
3230 /* Default cpu tuning to the architecture. */
3231 ix86_tune = ix86_arch;
3233 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3234 error ("CPU you selected does not support x86-64 "
3237 if (processor_alias_table[i].flags & PTA_MMX
3238 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3239 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3240 if (processor_alias_table[i].flags & PTA_3DNOW
3241 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3242 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3243 if (processor_alias_table[i].flags & PTA_3DNOW_A
3244 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3245 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3246 if (processor_alias_table[i].flags & PTA_SSE
3247 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3248 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3249 if (processor_alias_table[i].flags & PTA_SSE2
3250 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3251 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3252 if (processor_alias_table[i].flags & PTA_SSE3
3253 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3254 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3255 if (processor_alias_table[i].flags & PTA_SSSE3
3256 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3257 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3258 if (processor_alias_table[i].flags & PTA_SSE4_1
3259 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3260 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3261 if (processor_alias_table[i].flags & PTA_SSE4_2
3262 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3263 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3264 if (processor_alias_table[i].flags & PTA_AVX
3265 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3266 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3267 if (processor_alias_table[i].flags & PTA_FMA
3268 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3269 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3270 if (processor_alias_table[i].flags & PTA_SSE4A
3271 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3272 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3273 if (processor_alias_table[i].flags & PTA_FMA4
3274 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3275 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3276 if (processor_alias_table[i].flags & PTA_XOP
3277 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3278 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3279 if (processor_alias_table[i].flags & PTA_LWP
3280 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3281 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3282 if (processor_alias_table[i].flags & PTA_ABM
3283 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3284 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3285 if (processor_alias_table[i].flags & PTA_BMI
3286 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI))
3287 ix86_isa_flags |= OPTION_MASK_ISA_BMI;
3288 if (processor_alias_table[i].flags & (PTA_LZCNT | PTA_ABM)
3289 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LZCNT))
3290 ix86_isa_flags |= OPTION_MASK_ISA_LZCNT;
3291 if (processor_alias_table[i].flags & PTA_TBM
3292 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_TBM))
3293 ix86_isa_flags |= OPTION_MASK_ISA_TBM;
3294 if (processor_alias_table[i].flags & PTA_CX16
3295 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3296 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3297 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3298 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3299 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3300 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3301 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3302 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3303 if (processor_alias_table[i].flags & PTA_MOVBE
3304 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3305 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3306 if (processor_alias_table[i].flags & PTA_AES
3307 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3308 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3309 if (processor_alias_table[i].flags & PTA_PCLMUL
3310 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3311 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3312 if (processor_alias_table[i].flags & PTA_FSGSBASE
3313 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FSGSBASE))
3314 ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE;
3315 if (processor_alias_table[i].flags & PTA_RDRND
3316 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_RDRND))
3317 ix86_isa_flags |= OPTION_MASK_ISA_RDRND;
3318 if (processor_alias_table[i].flags & PTA_F16C
3319 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_F16C))
3320 ix86_isa_flags |= OPTION_MASK_ISA_F16C;
3321 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3322 x86_prefetch_sse = true;
3327 if (!strcmp (ix86_arch_string, "generic"))
3328 error ("generic CPU can be used only for %stune=%s %s",
3329 prefix, suffix, sw);
3330 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3331 error ("bad value (%s) for %sarch=%s %s",
3332 ix86_arch_string, prefix, suffix, sw);
3334 ix86_arch_mask = 1u << ix86_arch;
3335 for (i = 0; i < X86_ARCH_LAST; ++i)
3336 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3338 for (i = 0; i < pta_size; i++)
3339 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3341 ix86_schedule = processor_alias_table[i].schedule;
3342 ix86_tune = processor_alias_table[i].processor;
3345 if (!(processor_alias_table[i].flags & PTA_64BIT))
3347 if (ix86_tune_defaulted)
3349 ix86_tune_string = "x86-64";
3350 for (i = 0; i < pta_size; i++)
3351 if (! strcmp (ix86_tune_string,
3352 processor_alias_table[i].name))
3354 ix86_schedule = processor_alias_table[i].schedule;
3355 ix86_tune = processor_alias_table[i].processor;
3358 error ("CPU you selected does not support x86-64 "
3364 /* Adjust tuning when compiling for 32-bit ABI. */
3367 case PROCESSOR_GENERIC64:
3368 ix86_tune = PROCESSOR_GENERIC32;
3369 ix86_schedule = CPU_PENTIUMPRO;
3372 case PROCESSOR_CORE2_64:
3373 ix86_tune = PROCESSOR_CORE2_32;
3376 case PROCESSOR_COREI7_64:
3377 ix86_tune = PROCESSOR_COREI7_32;
3384 /* Intel CPUs have always interpreted SSE prefetch instructions as
3385 NOPs; so, we can enable SSE prefetch instructions even when
3386 -mtune (rather than -march) points us to a processor that has them.
3387 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3388 higher processors. */
3390 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3391 x86_prefetch_sse = true;
3395 if (ix86_tune_specified && i == pta_size)
3396 error ("bad value (%s) for %stune=%s %s",
3397 ix86_tune_string, prefix, suffix, sw);
3399 ix86_tune_mask = 1u << ix86_tune;
3400 for (i = 0; i < X86_TUNE_LAST; ++i)
3401 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3403 #ifndef USE_IX86_FRAME_POINTER
3404 #define USE_IX86_FRAME_POINTER 0
3407 #ifndef USE_X86_64_FRAME_POINTER
3408 #define USE_X86_64_FRAME_POINTER 0
3411 /* Set the default values for switches whose default depends on TARGET_64BIT
3412 in case they weren't overwritten by command line options. */
3415 if (optimize > 1 && !global_options_set.x_flag_zee)
3417 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3418 flag_omit_frame_pointer = !USE_X86_64_FRAME_POINTER;
3419 if (flag_asynchronous_unwind_tables == 2)
3420 flag_unwind_tables = flag_asynchronous_unwind_tables = 1;
3421 if (flag_pcc_struct_return == 2)
3422 flag_pcc_struct_return = 0;
3426 if (optimize >= 1 && !global_options_set.x_flag_omit_frame_pointer)
3427 flag_omit_frame_pointer = !(USE_IX86_FRAME_POINTER || optimize_size);
3428 if (flag_asynchronous_unwind_tables == 2)
3429 flag_asynchronous_unwind_tables = !USE_IX86_FRAME_POINTER;
3430 if (flag_pcc_struct_return == 2)
3431 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
3435 ix86_cost = &ix86_size_cost;
3437 ix86_cost = processor_target_table[ix86_tune].cost;
3439 /* Arrange to set up i386_stack_locals for all functions. */
3440 init_machine_status = ix86_init_machine_status;
3442 /* Validate -mregparm= value. */
3443 if (global_options_set.x_ix86_regparm)
3446 warning (0, "-mregparm is ignored in 64-bit mode");
3447 if (ix86_regparm > REGPARM_MAX)
3449 error ("-mregparm=%d is not between 0 and %d",
3450 ix86_regparm, REGPARM_MAX);
3455 ix86_regparm = REGPARM_MAX;
3457 /* Default align_* from the processor table. */
3458 if (align_loops == 0)
3460 align_loops = processor_target_table[ix86_tune].align_loop;
3461 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3463 if (align_jumps == 0)
3465 align_jumps = processor_target_table[ix86_tune].align_jump;
3466 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3468 if (align_functions == 0)
3470 align_functions = processor_target_table[ix86_tune].align_func;
3473 /* Provide default for -mbranch-cost= value. */
3474 if (!global_options_set.x_ix86_branch_cost)
3475 ix86_branch_cost = ix86_cost->branch_cost;
3479 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3481 /* Enable by default the SSE and MMX builtins. Do allow the user to
3482 explicitly disable any of these. In particular, disabling SSE and
3483 MMX for kernel code is extremely useful. */
3484 if (!ix86_arch_specified)
3486 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3487 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3490 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3494 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3496 if (!ix86_arch_specified)
3498 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3500 /* i386 ABI does not specify red zone. It still makes sense to use it
3501 when programmer takes care to stack from being destroyed. */
3502 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3503 target_flags |= MASK_NO_RED_ZONE;
3506 /* Keep nonleaf frame pointers. */
3507 if (flag_omit_frame_pointer)
3508 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3509 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3510 flag_omit_frame_pointer = 1;
3512 /* If we're doing fast math, we don't care about comparison order
3513 wrt NaNs. This lets us use a shorter comparison sequence. */
3514 if (flag_finite_math_only)
3515 target_flags &= ~MASK_IEEE_FP;
3517 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3518 since the insns won't need emulation. */
3519 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3520 target_flags &= ~MASK_NO_FANCY_MATH_387;
3522 /* Likewise, if the target doesn't have a 387, or we've specified
3523 software floating point, don't use 387 inline intrinsics. */
3525 target_flags |= MASK_NO_FANCY_MATH_387;
3527 /* Turn on MMX builtins for -msse. */
3530 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3531 x86_prefetch_sse = true;
3534 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3535 if (TARGET_SSE4_2 || TARGET_ABM)
3536 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3538 /* Turn on lzcnt instruction for -mabm. */
3540 ix86_isa_flags |= OPTION_MASK_ISA_LZCNT & ~ix86_isa_flags_explicit;
3542 /* Validate -mpreferred-stack-boundary= value or default it to
3543 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3544 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3545 if (global_options_set.x_ix86_preferred_stack_boundary_arg)
3547 int min = (TARGET_64BIT ? 4 : 2);
3548 int max = (TARGET_SEH ? 4 : 12);
3550 if (ix86_preferred_stack_boundary_arg < min
3551 || ix86_preferred_stack_boundary_arg > max)
3554 error ("-mpreferred-stack-boundary is not supported "
3557 error ("-mpreferred-stack-boundary=%d is not between %d and %d",
3558 ix86_preferred_stack_boundary_arg, min, max);
3561 ix86_preferred_stack_boundary
3562 = (1 << ix86_preferred_stack_boundary_arg) * BITS_PER_UNIT;
3565 /* Set the default value for -mstackrealign. */
3566 if (ix86_force_align_arg_pointer == -1)
3567 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3569 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3571 /* Validate -mincoming-stack-boundary= value or default it to
3572 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3573 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3574 if (global_options_set.x_ix86_incoming_stack_boundary_arg)
3576 if (ix86_incoming_stack_boundary_arg < (TARGET_64BIT ? 4 : 2)
3577 || ix86_incoming_stack_boundary_arg > 12)
3578 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3579 ix86_incoming_stack_boundary_arg, TARGET_64BIT ? 4 : 2);
3582 ix86_user_incoming_stack_boundary
3583 = (1 << ix86_incoming_stack_boundary_arg) * BITS_PER_UNIT;
3584 ix86_incoming_stack_boundary
3585 = ix86_user_incoming_stack_boundary;
3589 /* Accept -msseregparm only if at least SSE support is enabled. */
3590 if (TARGET_SSEREGPARM
3592 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3594 if (global_options_set.x_ix86_fpmath)
3596 if (ix86_fpmath & FPMATH_SSE)
3600 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3601 ix86_fpmath = FPMATH_387;
3603 else if ((ix86_fpmath & FPMATH_387) && !TARGET_80387)
3605 warning (0, "387 instruction set disabled, using SSE arithmetics");
3606 ix86_fpmath = FPMATH_SSE;
3611 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3613 /* If the i387 is disabled, then do not return values in it. */
3615 target_flags &= ~MASK_FLOAT_RETURNS;
3617 /* Use external vectorized library in vectorizing intrinsics. */
3618 if (global_options_set.x_ix86_veclibabi_type)
3619 switch (ix86_veclibabi_type)
3621 case ix86_veclibabi_type_svml:
3622 ix86_veclib_handler = ix86_veclibabi_svml;
3625 case ix86_veclibabi_type_acml:
3626 ix86_veclib_handler = ix86_veclibabi_acml;
3633 if ((!USE_IX86_FRAME_POINTER
3634 || (x86_accumulate_outgoing_args & ix86_tune_mask))
3635 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3637 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3639 /* ??? Unwind info is not correct around the CFG unless either a frame
3640 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3641 unwind info generation to be aware of the CFG and propagating states
3643 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3644 || flag_exceptions || flag_non_call_exceptions)
3645 && flag_omit_frame_pointer
3646 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3648 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3649 warning (0, "unwind tables currently require either a frame pointer "
3650 "or %saccumulate-outgoing-args%s for correctness",
3652 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3655 /* If stack probes are required, the space used for large function
3656 arguments on the stack must also be probed, so enable
3657 -maccumulate-outgoing-args so this happens in the prologue. */
3658 if (TARGET_STACK_PROBE
3659 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3661 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3662 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3663 "for correctness", prefix, suffix);
3664 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3667 /* For sane SSE instruction set generation we need fcomi instruction.
3668 It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic
3669 expands to a sequence that includes conditional move. */
3670 if (TARGET_SSE || TARGET_RDRND)
3673 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3676 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3677 p = strchr (internal_label_prefix, 'X');
3678 internal_label_prefix_len = p - internal_label_prefix;
3682 /* When scheduling description is not available, disable scheduler pass
3683 so it won't slow down the compilation and make x87 code slower. */
3684 if (!TARGET_SCHEDULE)
3685 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3687 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3688 ix86_cost->simultaneous_prefetches,
3689 global_options.x_param_values,
3690 global_options_set.x_param_values);
3691 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, ix86_cost->prefetch_block,
3692 global_options.x_param_values,
3693 global_options_set.x_param_values);
3694 maybe_set_param_value (PARAM_L1_CACHE_SIZE, ix86_cost->l1_cache_size,
3695 global_options.x_param_values,
3696 global_options_set.x_param_values);
3697 maybe_set_param_value (PARAM_L2_CACHE_SIZE, ix86_cost->l2_cache_size,
3698 global_options.x_param_values,
3699 global_options_set.x_param_values);
3701 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
3702 if (flag_prefetch_loop_arrays < 0
3705 && TARGET_SOFTWARE_PREFETCHING_BENEFICIAL)
3706 flag_prefetch_loop_arrays = 1;
3708 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3709 can be optimized to ap = __builtin_next_arg (0). */
3710 if (!TARGET_64BIT && !flag_split_stack)
3711 targetm.expand_builtin_va_start = NULL;
3715 ix86_gen_leave = gen_leave_rex64;
3716 ix86_gen_add3 = gen_adddi3;
3717 ix86_gen_sub3 = gen_subdi3;
3718 ix86_gen_sub3_carry = gen_subdi3_carry;
3719 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3720 ix86_gen_monitor = gen_sse3_monitor64;
3721 ix86_gen_andsp = gen_anddi3;
3722 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_di;
3723 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
3724 ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
3728 ix86_gen_leave = gen_leave;
3729 ix86_gen_add3 = gen_addsi3;
3730 ix86_gen_sub3 = gen_subsi3;
3731 ix86_gen_sub3_carry = gen_subsi3_carry;
3732 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3733 ix86_gen_monitor = gen_sse3_monitor;
3734 ix86_gen_andsp = gen_andsi3;
3735 ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_si;
3736 ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
3737 ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
3741 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3743 target_flags |= MASK_CLD & ~target_flags_explicit;
3746 if (!TARGET_64BIT && flag_pic)
3748 if (flag_fentry > 0)
3749 sorry ("-mfentry isn%'t supported for 32-bit in combination "
3753 else if (TARGET_SEH)
3755 if (flag_fentry == 0)
3756 sorry ("-mno-fentry isn%'t compatible with SEH");
3759 else if (flag_fentry < 0)
3761 #if defined(PROFILE_BEFORE_PROLOGUE)
3770 /* When not optimize for size, enable vzeroupper optimization for
3771 TARGET_AVX with -fexpensive-optimizations and split 32-byte
3772 AVX unaligned load/store. */
3775 if (flag_expensive_optimizations
3776 && !(target_flags_explicit & MASK_VZEROUPPER))
3777 target_flags |= MASK_VZEROUPPER;
3778 if ((x86_avx256_split_unaligned_load & ix86_tune_mask)
3779 && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD))
3780 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD;
3781 if ((x86_avx256_split_unaligned_store & ix86_tune_mask)
3782 && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE))
3783 target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE;
3784 /* Enable 128-bit AVX instruction generation for the auto-vectorizer. */
3785 if (TARGET_AVX128_OPTIMAL && !(target_flags_explicit & MASK_PREFER_AVX128))
3786 target_flags |= MASK_PREFER_AVX128;
3791 /* Disable vzeroupper pass if TARGET_AVX is disabled. */
3792 target_flags &= ~MASK_VZEROUPPER;
3795 /* Save the initial options in case the user does function specific
3798 target_option_default_node = target_option_current_node
3799 = build_target_option_node ();
3802 /* Return TRUE if VAL is passed in register with 256bit AVX modes. */
3805 function_pass_avx256_p (const_rtx val)
3810 if (REG_P (val) && VALID_AVX256_REG_MODE (GET_MODE (val)))
3813 if (GET_CODE (val) == PARALLEL)
3818 for (i = XVECLEN (val, 0) - 1; i >= 0; i--)
3820 r = XVECEXP (val, 0, i);
3821 if (GET_CODE (r) == EXPR_LIST
3823 && REG_P (XEXP (r, 0))
3824 && (GET_MODE (XEXP (r, 0)) == OImode
3825 || VALID_AVX256_REG_MODE (GET_MODE (XEXP (r, 0)))))
3833 /* Implement the TARGET_OPTION_OVERRIDE hook. */
3836 ix86_option_override (void)
3838 ix86_option_override_internal (true);
3841 /* Update register usage after having seen the compiler flags. */
3844 ix86_conditional_register_usage (void)
3849 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3851 if (fixed_regs[i] > 1)
3852 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3853 if (call_used_regs[i] > 1)
3854 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3857 /* The PIC register, if it exists, is fixed. */
3858 j = PIC_OFFSET_TABLE_REGNUM;
3859 if (j != INVALID_REGNUM)
3860 fixed_regs[j] = call_used_regs[j] = 1;
3862 /* The 64-bit MS_ABI changes the set of call-used registers. */
3863 if (TARGET_64BIT_MS_ABI)
3865 call_used_regs[SI_REG] = 0;
3866 call_used_regs[DI_REG] = 0;
3867 call_used_regs[XMM6_REG] = 0;
3868 call_used_regs[XMM7_REG] = 0;
3869 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3870 call_used_regs[i] = 0;
3873 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3874 other call-clobbered regs for 64-bit. */
3877 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3879 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3880 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3881 && call_used_regs[i])
3882 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3885 /* If MMX is disabled, squash the registers. */
3887 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3888 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3889 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3891 /* If SSE is disabled, squash the registers. */
3893 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3894 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3895 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3897 /* If the FPU is disabled, squash the registers. */
3898 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3899 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3900 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3901 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3903 /* If 32-bit, squash the 64-bit registers. */
3906 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3908 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3914 /* Save the current options */
3917 ix86_function_specific_save (struct cl_target_option *ptr)
3919 ptr->arch = ix86_arch;
3920 ptr->schedule = ix86_schedule;
3921 ptr->tune = ix86_tune;
3922 ptr->branch_cost = ix86_branch_cost;
3923 ptr->tune_defaulted = ix86_tune_defaulted;
3924 ptr->arch_specified = ix86_arch_specified;
3925 ptr->x_ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3926 ptr->ix86_target_flags_explicit = target_flags_explicit;
3928 /* The fields are char but the variables are not; make sure the
3929 values fit in the fields. */
3930 gcc_assert (ptr->arch == ix86_arch);
3931 gcc_assert (ptr->schedule == ix86_schedule);
3932 gcc_assert (ptr->tune == ix86_tune);
3933 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3936 /* Restore the current options */
3939 ix86_function_specific_restore (struct cl_target_option *ptr)
3941 enum processor_type old_tune = ix86_tune;
3942 enum processor_type old_arch = ix86_arch;
3943 unsigned int ix86_arch_mask, ix86_tune_mask;
3946 ix86_arch = (enum processor_type) ptr->arch;
3947 ix86_schedule = (enum attr_cpu) ptr->schedule;
3948 ix86_tune = (enum processor_type) ptr->tune;
3949 ix86_branch_cost = ptr->branch_cost;
3950 ix86_tune_defaulted = ptr->tune_defaulted;
3951 ix86_arch_specified = ptr->arch_specified;
3952 ix86_isa_flags_explicit = ptr->x_ix86_isa_flags_explicit;
3953 target_flags_explicit = ptr->ix86_target_flags_explicit;
3955 /* Recreate the arch feature tests if the arch changed */
3956 if (old_arch != ix86_arch)
3958 ix86_arch_mask = 1u << ix86_arch;
3959 for (i = 0; i < X86_ARCH_LAST; ++i)
3960 ix86_arch_features[i]
3961 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3964 /* Recreate the tune optimization tests */
3965 if (old_tune != ix86_tune)
3967 ix86_tune_mask = 1u << ix86_tune;
3968 for (i = 0; i < X86_TUNE_LAST; ++i)
3969 ix86_tune_features[i]
3970 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3974 /* Print the current options */
3977 ix86_function_specific_print (FILE *file, int indent,
3978 struct cl_target_option *ptr)
3981 = ix86_target_string (ptr->x_ix86_isa_flags, ptr->x_target_flags,
3982 NULL, NULL, ptr->x_ix86_fpmath, false);
3984 fprintf (file, "%*sarch = %d (%s)\n",
3987 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3988 ? cpu_names[ptr->arch]
3991 fprintf (file, "%*stune = %d (%s)\n",
3994 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3995 ? cpu_names[ptr->tune]
3998 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
4002 fprintf (file, "%*s%s\n", indent, "", target_string);
4003 free (target_string);
4008 /* Inner function to process the attribute((target(...))), take an argument and
4009 set the current options from the argument. If we have a list, recursively go
4013 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[],
4014 struct gcc_options *enum_opts_set)
4019 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
4020 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
4021 #define IX86_ATTR_ENUM(S,O) { S, sizeof (S)-1, ix86_opt_enum, O, 0 }
4022 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
4023 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
4039 enum ix86_opt_type type;
4044 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
4045 IX86_ATTR_ISA ("abm", OPT_mabm),
4046 IX86_ATTR_ISA ("bmi", OPT_mbmi),
4047 IX86_ATTR_ISA ("lzcnt", OPT_mlzcnt),
4048 IX86_ATTR_ISA ("tbm", OPT_mtbm),
4049 IX86_ATTR_ISA ("aes", OPT_maes),
4050 IX86_ATTR_ISA ("avx", OPT_mavx),
4051 IX86_ATTR_ISA ("mmx", OPT_mmmx),
4052 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
4053 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
4054 IX86_ATTR_ISA ("sse", OPT_msse),
4055 IX86_ATTR_ISA ("sse2", OPT_msse2),
4056 IX86_ATTR_ISA ("sse3", OPT_msse3),
4057 IX86_ATTR_ISA ("sse4", OPT_msse4),
4058 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
4059 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
4060 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
4061 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
4062 IX86_ATTR_ISA ("fma4", OPT_mfma4),
4063 IX86_ATTR_ISA ("xop", OPT_mxop),
4064 IX86_ATTR_ISA ("lwp", OPT_mlwp),
4065 IX86_ATTR_ISA ("fsgsbase", OPT_mfsgsbase),
4066 IX86_ATTR_ISA ("rdrnd", OPT_mrdrnd),
4067 IX86_ATTR_ISA ("f16c", OPT_mf16c),
4070 IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_),
4072 /* string options */
4073 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
4074 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
4077 IX86_ATTR_YES ("cld",
4081 IX86_ATTR_NO ("fancy-math-387",
4082 OPT_mfancy_math_387,
4083 MASK_NO_FANCY_MATH_387),
4085 IX86_ATTR_YES ("ieee-fp",
4089 IX86_ATTR_YES ("inline-all-stringops",
4090 OPT_minline_all_stringops,
4091 MASK_INLINE_ALL_STRINGOPS),
4093 IX86_ATTR_YES ("inline-stringops-dynamically",
4094 OPT_minline_stringops_dynamically,
4095 MASK_INLINE_STRINGOPS_DYNAMICALLY),
4097 IX86_ATTR_NO ("align-stringops",
4098 OPT_mno_align_stringops,
4099 MASK_NO_ALIGN_STRINGOPS),
4101 IX86_ATTR_YES ("recip",
4107 /* If this is a list, recurse to get the options. */
4108 if (TREE_CODE (args) == TREE_LIST)
4112 for (; args; args = TREE_CHAIN (args))
4113 if (TREE_VALUE (args)
4114 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args),
4115 p_strings, enum_opts_set))
4121 else if (TREE_CODE (args) != STRING_CST)
4124 /* Handle multiple arguments separated by commas. */
4125 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
4127 while (next_optstr && *next_optstr != '\0')
4129 char *p = next_optstr;
4131 char *comma = strchr (next_optstr, ',');
4132 const char *opt_string;
4133 size_t len, opt_len;
4138 enum ix86_opt_type type = ix86_opt_unknown;
4144 len = comma - next_optstr;
4145 next_optstr = comma + 1;
4153 /* Recognize no-xxx. */
4154 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
4163 /* Find the option. */
4166 for (i = 0; i < ARRAY_SIZE (attrs); i++)
4168 type = attrs[i].type;
4169 opt_len = attrs[i].len;
4170 if (ch == attrs[i].string[0]
4171 && ((type != ix86_opt_str && type != ix86_opt_enum)
4174 && memcmp (p, attrs[i].string, opt_len) == 0)
4177 mask = attrs[i].mask;
4178 opt_string = attrs[i].string;
4183 /* Process the option. */
4186 error ("attribute(target(\"%s\")) is unknown", orig_p);
4190 else if (type == ix86_opt_isa)
4192 struct cl_decoded_option decoded;
4194 generate_option (opt, NULL, opt_set_p, CL_TARGET, &decoded);
4195 ix86_handle_option (&global_options, &global_options_set,
4196 &decoded, input_location);
4199 else if (type == ix86_opt_yes || type == ix86_opt_no)
4201 if (type == ix86_opt_no)
4202 opt_set_p = !opt_set_p;
4205 target_flags |= mask;
4207 target_flags &= ~mask;
4210 else if (type == ix86_opt_str)
4214 error ("option(\"%s\") was already specified", opt_string);
4218 p_strings[opt] = xstrdup (p + opt_len);
4221 else if (type == ix86_opt_enum)
4226 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
4228 set_option (&global_options, enum_opts_set, opt, value,
4229 p + opt_len, DK_UNSPECIFIED, input_location,
4233 error ("attribute(target(\"%s\")) is unknown", orig_p);
4245 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
4248 ix86_valid_target_attribute_tree (tree args)
4250 const char *orig_arch_string = ix86_arch_string;
4251 const char *orig_tune_string = ix86_tune_string;
4252 enum fpmath_unit orig_fpmath_set = global_options_set.x_ix86_fpmath;
4253 int orig_tune_defaulted = ix86_tune_defaulted;
4254 int orig_arch_specified = ix86_arch_specified;
4255 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL };
4258 struct cl_target_option *def
4259 = TREE_TARGET_OPTION (target_option_default_node);
4260 struct gcc_options enum_opts_set;
4262 memset (&enum_opts_set, 0, sizeof (enum_opts_set));
4264 /* Process each of the options on the chain. */
4265 if (! ix86_valid_target_attribute_inner_p (args, option_strings,
4269 /* If the changed options are different from the default, rerun
4270 ix86_option_override_internal, and then save the options away.
4271 The string options are are attribute options, and will be undone
4272 when we copy the save structure. */
4273 if (ix86_isa_flags != def->x_ix86_isa_flags
4274 || target_flags != def->x_target_flags
4275 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4276 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4277 || enum_opts_set.x_ix86_fpmath)
4279 /* If we are using the default tune= or arch=, undo the string assigned,
4280 and use the default. */
4281 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4282 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4283 else if (!orig_arch_specified)
4284 ix86_arch_string = NULL;
4286 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4287 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4288 else if (orig_tune_defaulted)
4289 ix86_tune_string = NULL;
4291 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4292 if (enum_opts_set.x_ix86_fpmath)
4293 global_options_set.x_ix86_fpmath = (enum fpmath_unit) 1;
4294 else if (!TARGET_64BIT && TARGET_SSE)
4296 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
4297 global_options_set.x_ix86_fpmath = (enum fpmath_unit) 1;
4300 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4301 ix86_option_override_internal (false);
4303 /* Add any builtin functions with the new isa if any. */
4304 ix86_add_new_builtins (ix86_isa_flags);
4306 /* Save the current options unless we are validating options for
4308 t = build_target_option_node ();
4310 ix86_arch_string = orig_arch_string;
4311 ix86_tune_string = orig_tune_string;
4312 global_options_set.x_ix86_fpmath = orig_fpmath_set;
4314 /* Free up memory allocated to hold the strings */
4315 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4316 free (option_strings[i]);
4322 /* Hook to validate attribute((target("string"))). */
4325 ix86_valid_target_attribute_p (tree fndecl,
4326 tree ARG_UNUSED (name),
4328 int ARG_UNUSED (flags))
4330 struct cl_target_option cur_target;
4332 tree old_optimize = build_optimization_node ();
4333 tree new_target, new_optimize;
4334 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4336 /* If the function changed the optimization levels as well as setting target
4337 options, start with the optimizations specified. */
4338 if (func_optimize && func_optimize != old_optimize)
4339 cl_optimization_restore (&global_options,
4340 TREE_OPTIMIZATION (func_optimize));
4342 /* The target attributes may also change some optimization flags, so update
4343 the optimization options if necessary. */
4344 cl_target_option_save (&cur_target, &global_options);
4345 new_target = ix86_valid_target_attribute_tree (args);
4346 new_optimize = build_optimization_node ();
4353 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4355 if (old_optimize != new_optimize)
4356 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4359 cl_target_option_restore (&global_options, &cur_target);
4361 if (old_optimize != new_optimize)
4362 cl_optimization_restore (&global_options,
4363 TREE_OPTIMIZATION (old_optimize));
4369 /* Hook to determine if one function can safely inline another. */
4372 ix86_can_inline_p (tree caller, tree callee)
4375 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4376 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4378 /* If callee has no option attributes, then it is ok to inline. */
4382 /* If caller has no option attributes, but callee does then it is not ok to
4384 else if (!caller_tree)
4389 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4390 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4392 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4393 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4395 if ((caller_opts->x_ix86_isa_flags & callee_opts->x_ix86_isa_flags)
4396 != callee_opts->x_ix86_isa_flags)
4399 /* See if we have the same non-isa options. */
4400 else if (caller_opts->x_target_flags != callee_opts->x_target_flags)
4403 /* See if arch, tune, etc. are the same. */
4404 else if (caller_opts->arch != callee_opts->arch)
4407 else if (caller_opts->tune != callee_opts->tune)
4410 else if (caller_opts->x_ix86_fpmath != callee_opts->x_ix86_fpmath)
4413 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4424 /* Remember the last target of ix86_set_current_function. */
4425 static GTY(()) tree ix86_previous_fndecl;
4427 /* Establish appropriate back-end context for processing the function
4428 FNDECL. The argument might be NULL to indicate processing at top
4429 level, outside of any function scope. */
4431 ix86_set_current_function (tree fndecl)
4433 /* Only change the context if the function changes. This hook is called
4434 several times in the course of compiling a function, and we don't want to
4435 slow things down too much or call target_reinit when it isn't safe. */
4436 if (fndecl && fndecl != ix86_previous_fndecl)
4438 tree old_tree = (ix86_previous_fndecl
4439 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4442 tree new_tree = (fndecl
4443 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4446 ix86_previous_fndecl = fndecl;
4447 if (old_tree == new_tree)
4452 cl_target_option_restore (&global_options,
4453 TREE_TARGET_OPTION (new_tree));
4459 struct cl_target_option *def
4460 = TREE_TARGET_OPTION (target_option_current_node);
4462 cl_target_option_restore (&global_options, def);
4469 /* Return true if this goes in large data/bss. */
4472 ix86_in_large_data_p (tree exp)
4474 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4477 /* Functions are never large data. */
4478 if (TREE_CODE (exp) == FUNCTION_DECL)
4481 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4483 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4484 if (strcmp (section, ".ldata") == 0
4485 || strcmp (section, ".lbss") == 0)
4491 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4493 /* If this is an incomplete type with size 0, then we can't put it
4494 in data because it might be too big when completed. */
4495 if (!size || size > ix86_section_threshold)
4502 /* Switch to the appropriate section for output of DECL.
4503 DECL is either a `VAR_DECL' node or a constant of some sort.
4504 RELOC indicates whether forming the initial value of DECL requires
4505 link-time relocations. */
4507 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4511 x86_64_elf_select_section (tree decl, int reloc,
4512 unsigned HOST_WIDE_INT align)
4514 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4515 && ix86_in_large_data_p (decl))
4517 const char *sname = NULL;
4518 unsigned int flags = SECTION_WRITE;
4519 switch (categorize_decl_for_section (decl, reloc))
4524 case SECCAT_DATA_REL:
4525 sname = ".ldata.rel";
4527 case SECCAT_DATA_REL_LOCAL:
4528 sname = ".ldata.rel.local";
4530 case SECCAT_DATA_REL_RO:
4531 sname = ".ldata.rel.ro";
4533 case SECCAT_DATA_REL_RO_LOCAL:
4534 sname = ".ldata.rel.ro.local";
4538 flags |= SECTION_BSS;
4541 case SECCAT_RODATA_MERGE_STR:
4542 case SECCAT_RODATA_MERGE_STR_INIT:
4543 case SECCAT_RODATA_MERGE_CONST:
4547 case SECCAT_SRODATA:
4554 /* We don't split these for medium model. Place them into
4555 default sections and hope for best. */
4560 /* We might get called with string constants, but get_named_section
4561 doesn't like them as they are not DECLs. Also, we need to set
4562 flags in that case. */
4564 return get_section (sname, flags, NULL);
4565 return get_named_section (decl, sname, reloc);
4568 return default_elf_select_section (decl, reloc, align);
4571 /* Build up a unique section name, expressed as a
4572 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4573 RELOC indicates whether the initial value of EXP requires
4574 link-time relocations. */
4576 static void ATTRIBUTE_UNUSED
4577 x86_64_elf_unique_section (tree decl, int reloc)
4579 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4580 && ix86_in_large_data_p (decl))
4582 const char *prefix = NULL;
4583 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4584 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4586 switch (categorize_decl_for_section (decl, reloc))
4589 case SECCAT_DATA_REL:
4590 case SECCAT_DATA_REL_LOCAL:
4591 case SECCAT_DATA_REL_RO:
4592 case SECCAT_DATA_REL_RO_LOCAL:
4593 prefix = one_only ? ".ld" : ".ldata";
4596 prefix = one_only ? ".lb" : ".lbss";
4599 case SECCAT_RODATA_MERGE_STR:
4600 case SECCAT_RODATA_MERGE_STR_INIT:
4601 case SECCAT_RODATA_MERGE_CONST:
4602 prefix = one_only ? ".lr" : ".lrodata";
4604 case SECCAT_SRODATA:
4611 /* We don't split these for medium model. Place them into
4612 default sections and hope for best. */
4617 const char *name, *linkonce;
4620 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4621 name = targetm.strip_name_encoding (name);
4623 /* If we're using one_only, then there needs to be a .gnu.linkonce
4624 prefix to the section name. */
4625 linkonce = one_only ? ".gnu.linkonce" : "";
4627 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4629 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4633 default_unique_section (decl, reloc);
4636 #ifdef COMMON_ASM_OP
4637 /* This says how to output assembler code to declare an
4638 uninitialized external linkage data object.
4640 For medium model x86-64 we need to use .largecomm opcode for
4643 x86_elf_aligned_common (FILE *file,
4644 const char *name, unsigned HOST_WIDE_INT size,
4647 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4648 && size > (unsigned int)ix86_section_threshold)
4649 fputs (".largecomm\t", file);
4651 fputs (COMMON_ASM_OP, file);
4652 assemble_name (file, name);
4653 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4654 size, align / BITS_PER_UNIT);
4658 /* Utility function for targets to use in implementing
4659 ASM_OUTPUT_ALIGNED_BSS. */
4662 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4663 const char *name, unsigned HOST_WIDE_INT size,
4666 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4667 && size > (unsigned int)ix86_section_threshold)
4668 switch_to_section (get_named_section (decl, ".lbss", 0));
4670 switch_to_section (bss_section);
4671 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4672 #ifdef ASM_DECLARE_OBJECT_NAME
4673 last_assemble_variable_decl = decl;
4674 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4676 /* Standard thing is just output label for the object. */
4677 ASM_OUTPUT_LABEL (file, name);
4678 #endif /* ASM_DECLARE_OBJECT_NAME */
4679 ASM_OUTPUT_SKIP (file, size ? size : 1);
4682 /* Decide whether we must probe the stack before any space allocation
4683 on this target. It's essentially TARGET_STACK_PROBE except when
4684 -fstack-check causes the stack to be already probed differently. */
4687 ix86_target_stack_probe (void)
4689 /* Do not probe the stack twice if static stack checking is enabled. */
4690 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
4693 return TARGET_STACK_PROBE;
4696 /* Decide whether we can make a sibling call to a function. DECL is the
4697 declaration of the function being targeted by the call and EXP is the
4698 CALL_EXPR representing the call. */
4701 ix86_function_ok_for_sibcall (tree decl, tree exp)
4703 tree type, decl_or_type;
4706 /* If we are generating position-independent code, we cannot sibcall
4707 optimize any indirect call, or a direct call to a global function,
4708 as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
4712 && (!decl || !targetm.binds_local_p (decl)))
4715 /* If we need to align the outgoing stack, then sibcalling would
4716 unalign the stack, which may break the called function. */
4717 if (ix86_minimum_incoming_stack_boundary (true)
4718 < PREFERRED_STACK_BOUNDARY)
4723 decl_or_type = decl;
4724 type = TREE_TYPE (decl);
4728 /* We're looking at the CALL_EXPR, we need the type of the function. */
4729 type = CALL_EXPR_FN (exp); /* pointer expression */
4730 type = TREE_TYPE (type); /* pointer type */
4731 type = TREE_TYPE (type); /* function type */
4732 decl_or_type = type;
4735 /* Check that the return value locations are the same. Like
4736 if we are returning floats on the 80387 register stack, we cannot
4737 make a sibcall from a function that doesn't return a float to a
4738 function that does or, conversely, from a function that does return
4739 a float to a function that doesn't; the necessary stack adjustment
4740 would not be executed. This is also the place we notice
4741 differences in the return value ABI. Note that it is ok for one
4742 of the functions to have void return type as long as the return
4743 value of the other is passed in a register. */
4744 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4745 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4747 if (STACK_REG_P (a) || STACK_REG_P (b))
4749 if (!rtx_equal_p (a, b))
4752 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4754 /* Disable sibcall if we need to generate vzeroupper after
4756 if (TARGET_VZEROUPPER
4757 && cfun->machine->callee_return_avx256_p
4758 && !cfun->machine->caller_return_avx256_p)
4761 else if (!rtx_equal_p (a, b))
4766 /* The SYSV ABI has more call-clobbered registers;
4767 disallow sibcalls from MS to SYSV. */
4768 if (cfun->machine->call_abi == MS_ABI
4769 && ix86_function_type_abi (type) == SYSV_ABI)
4774 /* If this call is indirect, we'll need to be able to use a
4775 call-clobbered register for the address of the target function.
4776 Make sure that all such registers are not used for passing
4777 parameters. Note that DLLIMPORT functions are indirect. */
4779 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4781 if (ix86_function_regparm (type, NULL) >= 3)
4783 /* ??? Need to count the actual number of registers to be used,
4784 not the possible number of registers. Fix later. */
4790 /* Otherwise okay. That also includes certain types of indirect calls. */
4794 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4795 and "sseregparm" calling convention attributes;
4796 arguments as in struct attribute_spec.handler. */
4799 ix86_handle_cconv_attribute (tree *node, tree name,
4801 int flags ATTRIBUTE_UNUSED,
4804 if (TREE_CODE (*node) != FUNCTION_TYPE
4805 && TREE_CODE (*node) != METHOD_TYPE
4806 && TREE_CODE (*node) != FIELD_DECL
4807 && TREE_CODE (*node) != TYPE_DECL)
4809 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4811 *no_add_attrs = true;
4815 /* Can combine regparm with all attributes but fastcall, and thiscall. */
4816 if (is_attribute_p ("regparm", name))
4820 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4822 error ("fastcall and regparm attributes are not compatible");
4825 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4827 error ("regparam and thiscall attributes are not compatible");
4830 cst = TREE_VALUE (args);
4831 if (TREE_CODE (cst) != INTEGER_CST)
4833 warning (OPT_Wattributes,
4834 "%qE attribute requires an integer constant argument",
4836 *no_add_attrs = true;
4838 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4840 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4842 *no_add_attrs = true;
4850 /* Do not warn when emulating the MS ABI. */
4851 if ((TREE_CODE (*node) != FUNCTION_TYPE
4852 && TREE_CODE (*node) != METHOD_TYPE)
4853 || ix86_function_type_abi (*node) != MS_ABI)
4854 warning (OPT_Wattributes, "%qE attribute ignored",
4856 *no_add_attrs = true;
4860 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4861 if (is_attribute_p ("fastcall", name))
4863 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4865 error ("fastcall and cdecl attributes are not compatible");
4867 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4869 error ("fastcall and stdcall attributes are not compatible");
4871 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4873 error ("fastcall and regparm attributes are not compatible");
4875 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4877 error ("fastcall and thiscall attributes are not compatible");
4881 /* Can combine stdcall with fastcall (redundant), regparm and
4883 else if (is_attribute_p ("stdcall", name))
4885 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4887 error ("stdcall and cdecl attributes are not compatible");
4889 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4891 error ("stdcall and fastcall attributes are not compatible");
4893 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4895 error ("stdcall and thiscall attributes are not compatible");
4899 /* Can combine cdecl with regparm and sseregparm. */
4900 else if (is_attribute_p ("cdecl", name))
4902 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4904 error ("stdcall and cdecl attributes are not compatible");
4906 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4908 error ("fastcall and cdecl attributes are not compatible");
4910 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4912 error ("cdecl and thiscall attributes are not compatible");
4915 else if (is_attribute_p ("thiscall", name))
4917 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4918 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4920 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4922 error ("stdcall and thiscall attributes are not compatible");
4924 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4926 error ("fastcall and thiscall attributes are not compatible");
4928 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4930 error ("cdecl and thiscall attributes are not compatible");
4934 /* Can combine sseregparm with all attributes. */
4939 /* This function determines from TYPE the calling-convention. */
4942 ix86_get_callcvt (const_tree type)
4944 unsigned int ret = 0;
4949 return IX86_CALLCVT_CDECL;
4951 attrs = TYPE_ATTRIBUTES (type);
4952 if (attrs != NULL_TREE)
4954 if (lookup_attribute ("cdecl", attrs))
4955 ret |= IX86_CALLCVT_CDECL;
4956 else if (lookup_attribute ("stdcall", attrs))
4957 ret |= IX86_CALLCVT_STDCALL;
4958 else if (lookup_attribute ("fastcall", attrs))
4959 ret |= IX86_CALLCVT_FASTCALL;
4960 else if (lookup_attribute ("thiscall", attrs))
4961 ret |= IX86_CALLCVT_THISCALL;
4963 /* Regparam isn't allowed for thiscall and fastcall. */
4964 if ((ret & (IX86_CALLCVT_THISCALL | IX86_CALLCVT_FASTCALL)) == 0)
4966 if (lookup_attribute ("regparm", attrs))
4967 ret |= IX86_CALLCVT_REGPARM;
4968 if (lookup_attribute ("sseregparm", attrs))
4969 ret |= IX86_CALLCVT_SSEREGPARM;
4972 if (IX86_BASE_CALLCVT(ret) != 0)
4976 is_stdarg = stdarg_p (type);
4977 if (TARGET_RTD && !is_stdarg)
4978 return IX86_CALLCVT_STDCALL | ret;
4982 || TREE_CODE (type) != METHOD_TYPE
4983 || ix86_function_type_abi (type) != MS_ABI)
4984 return IX86_CALLCVT_CDECL | ret;
4986 return IX86_CALLCVT_THISCALL;
4989 /* Return 0 if the attributes for two types are incompatible, 1 if they
4990 are compatible, and 2 if they are nearly compatible (which causes a
4991 warning to be generated). */
4994 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4996 unsigned int ccvt1, ccvt2;
4998 if (TREE_CODE (type1) != FUNCTION_TYPE
4999 && TREE_CODE (type1) != METHOD_TYPE)
5002 ccvt1 = ix86_get_callcvt (type1);
5003 ccvt2 = ix86_get_callcvt (type2);
5006 if (ix86_function_regparm (type1, NULL)
5007 != ix86_function_regparm (type2, NULL))
5013 /* Return the regparm value for a function with the indicated TYPE and DECL.
5014 DECL may be NULL when calling function indirectly
5015 or considering a libcall. */
5018 ix86_function_regparm (const_tree type, const_tree decl)
5025 return (ix86_function_type_abi (type) == SYSV_ABI
5026 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
5027 ccvt = ix86_get_callcvt (type);
5028 regparm = ix86_regparm;
5030 if ((ccvt & IX86_CALLCVT_REGPARM) != 0)
5032 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
5035 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
5039 else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
5041 else if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
5044 /* Use register calling convention for local functions when possible. */
5046 && TREE_CODE (decl) == FUNCTION_DECL
5048 && !(profile_flag && !flag_fentry))
5050 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5051 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
5052 if (i && i->local && i->can_change_signature)
5054 int local_regparm, globals = 0, regno;
5056 /* Make sure no regparm register is taken by a
5057 fixed register variable. */
5058 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
5059 if (fixed_regs[local_regparm])
5062 /* We don't want to use regparm(3) for nested functions as
5063 these use a static chain pointer in the third argument. */
5064 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
5067 /* In 32-bit mode save a register for the split stack. */
5068 if (!TARGET_64BIT && local_regparm == 3 && flag_split_stack)
5071 /* Each fixed register usage increases register pressure,
5072 so less registers should be used for argument passing.
5073 This functionality can be overriden by an explicit
5075 for (regno = 0; regno <= DI_REG; regno++)
5076 if (fixed_regs[regno])
5080 = globals < local_regparm ? local_regparm - globals : 0;
5082 if (local_regparm > regparm)
5083 regparm = local_regparm;
5090 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
5091 DFmode (2) arguments in SSE registers for a function with the
5092 indicated TYPE and DECL. DECL may be NULL when calling function
5093 indirectly or considering a libcall. Otherwise return 0. */
5096 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
5098 gcc_assert (!TARGET_64BIT);
5100 /* Use SSE registers to pass SFmode and DFmode arguments if requested
5101 by the sseregparm attribute. */
5102 if (TARGET_SSEREGPARM
5103 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
5110 error ("calling %qD with attribute sseregparm without "
5111 "SSE/SSE2 enabled", decl);
5113 error ("calling %qT with attribute sseregparm without "
5114 "SSE/SSE2 enabled", type);
5122 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
5123 (and DFmode for SSE2) arguments in SSE registers. */
5124 if (decl && TARGET_SSE_MATH && optimize
5125 && !(profile_flag && !flag_fentry))
5127 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
5128 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
5129 if (i && i->local && i->can_change_signature)
5130 return TARGET_SSE2 ? 2 : 1;
5136 /* Return true if EAX is live at the start of the function. Used by
5137 ix86_expand_prologue to determine if we need special help before
5138 calling allocate_stack_worker. */
5141 ix86_eax_live_at_start_p (void)
5143 /* Cheat. Don't bother working forward from ix86_function_regparm
5144 to the function type to whether an actual argument is located in
5145 eax. Instead just look at cfg info, which is still close enough
5146 to correct at this point. This gives false positives for broken
5147 functions that might use uninitialized data that happens to be
5148 allocated in eax, but who cares? */
5149 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
5153 ix86_keep_aggregate_return_pointer (tree fntype)
5159 attr = lookup_attribute ("callee_pop_aggregate_return",
5160 TYPE_ATTRIBUTES (fntype));
5162 return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))) == 0);
5164 /* For 32-bit MS-ABI the default is to keep aggregate
5166 if (ix86_function_type_abi (fntype) == MS_ABI)
5169 return KEEP_AGGREGATE_RETURN_POINTER != 0;
5172 /* Value is the number of bytes of arguments automatically
5173 popped when returning from a subroutine call.
5174 FUNDECL is the declaration node of the function (as a tree),
5175 FUNTYPE is the data type of the function (as a tree),
5176 or for a library call it is an identifier node for the subroutine name.
5177 SIZE is the number of bytes of arguments passed on the stack.
5179 On the 80386, the RTD insn may be used to pop them if the number
5180 of args is fixed, but if the number is variable then the caller
5181 must pop them all. RTD can't be used for library calls now
5182 because the library is compiled with the Unix compiler.
5183 Use of RTD is a selectable option, since it is incompatible with
5184 standard Unix calling sequences. If the option is not selected,
5185 the caller must always pop the args.
5187 The attribute stdcall is equivalent to RTD on a per module basis. */
5190 ix86_return_pops_args (tree fundecl, tree funtype, int size)
5194 /* None of the 64-bit ABIs pop arguments. */
5198 ccvt = ix86_get_callcvt (funtype);
5200 if ((ccvt & (IX86_CALLCVT_STDCALL | IX86_CALLCVT_FASTCALL
5201 | IX86_CALLCVT_THISCALL)) != 0
5202 && ! stdarg_p (funtype))
5205 /* Lose any fake structure return argument if it is passed on the stack. */
5206 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
5207 && !ix86_keep_aggregate_return_pointer (funtype))
5209 int nregs = ix86_function_regparm (funtype, fundecl);
5211 return GET_MODE_SIZE (Pmode);
5217 /* Argument support functions. */
5219 /* Return true when register may be used to pass function parameters. */
5221 ix86_function_arg_regno_p (int regno)
5224 const int *parm_regs;
5229 return (regno < REGPARM_MAX
5230 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
5232 return (regno < REGPARM_MAX
5233 || (TARGET_MMX && MMX_REGNO_P (regno)
5234 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
5235 || (TARGET_SSE && SSE_REGNO_P (regno)
5236 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
5241 if (SSE_REGNO_P (regno) && TARGET_SSE)
5246 if (TARGET_SSE && SSE_REGNO_P (regno)
5247 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
5251 /* TODO: The function should depend on current function ABI but
5252 builtins.c would need updating then. Therefore we use the
5255 /* RAX is used as hidden argument to va_arg functions. */
5256 if (ix86_abi == SYSV_ABI && regno == AX_REG)
5259 if (ix86_abi == MS_ABI)
5260 parm_regs = x86_64_ms_abi_int_parameter_registers;
5262 parm_regs = x86_64_int_parameter_registers;
5263 for (i = 0; i < (ix86_abi == MS_ABI
5264 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
5265 if (regno == parm_regs[i])
5270 /* Return if we do not know how to pass TYPE solely in registers. */
5273 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
5275 if (must_pass_in_stack_var_size_or_pad (mode, type))
5278 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
5279 The layout_type routine is crafty and tries to trick us into passing
5280 currently unsupported vector types on the stack by using TImode. */
5281 return (!TARGET_64BIT && mode == TImode
5282 && type && TREE_CODE (type) != VECTOR_TYPE);
5285 /* It returns the size, in bytes, of the area reserved for arguments passed
5286 in registers for the function represented by fndecl dependent to the used
5289 ix86_reg_parm_stack_space (const_tree fndecl)
5291 enum calling_abi call_abi = SYSV_ABI;
5292 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
5293 call_abi = ix86_function_abi (fndecl);
5295 call_abi = ix86_function_type_abi (fndecl);
5296 if (TARGET_64BIT && call_abi == MS_ABI)
5301 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
5304 ix86_function_type_abi (const_tree fntype)
5306 if (fntype != NULL_TREE && TYPE_ATTRIBUTES (fntype) != NULL_TREE)
5308 enum calling_abi abi = ix86_abi;
5309 if (abi == SYSV_ABI)
5311 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5314 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5322 ix86_function_ms_hook_prologue (const_tree fn)
5324 if (fn && lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn)))
5326 if (decl_function_context (fn) != NULL_TREE)
5327 error_at (DECL_SOURCE_LOCATION (fn),
5328 "ms_hook_prologue is not compatible with nested function");
5335 static enum calling_abi
5336 ix86_function_abi (const_tree fndecl)
5340 return ix86_function_type_abi (TREE_TYPE (fndecl));
5343 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5346 ix86_cfun_abi (void)
5350 return cfun->machine->call_abi;
5353 /* Write the extra assembler code needed to declare a function properly. */
5356 ix86_asm_output_function_label (FILE *asm_out_file, const char *fname,
5359 bool is_ms_hook = ix86_function_ms_hook_prologue (decl);
5363 int i, filler_count = (TARGET_64BIT ? 32 : 16);
5364 unsigned int filler_cc = 0xcccccccc;
5366 for (i = 0; i < filler_count; i += 4)
5367 fprintf (asm_out_file, ASM_LONG " %#x\n", filler_cc);
5370 #ifdef SUBTARGET_ASM_UNWIND_INIT
5371 SUBTARGET_ASM_UNWIND_INIT (asm_out_file);
5374 ASM_OUTPUT_LABEL (asm_out_file, fname);
5376 /* Output magic byte marker, if hot-patch attribute is set. */
5381 /* leaq [%rsp + 0], %rsp */
5382 asm_fprintf (asm_out_file, ASM_BYTE
5383 "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n");
5387 /* movl.s %edi, %edi
5389 movl.s %esp, %ebp */
5390 asm_fprintf (asm_out_file, ASM_BYTE
5391 "0x8b, 0xff, 0x55, 0x8b, 0xec\n");
5397 extern void init_regs (void);
5399 /* Implementation of call abi switching target hook. Specific to FNDECL
5400 the specific call register sets are set. See also
5401 ix86_conditional_register_usage for more details. */
5403 ix86_call_abi_override (const_tree fndecl)
5405 if (fndecl == NULL_TREE)
5406 cfun->machine->call_abi = ix86_abi;
5408 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5411 /* 64-bit MS and SYSV ABI have different set of call used registers. Avoid
5412 expensive re-initialization of init_regs each time we switch function context
5413 since this is needed only during RTL expansion. */
5415 ix86_maybe_switch_abi (void)
5418 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5422 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5423 for a call to a function whose data type is FNTYPE.
5424 For a library call, FNTYPE is 0. */
5427 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5428 tree fntype, /* tree ptr for function decl */
5429 rtx libname, /* SYMBOL_REF of library name or 0 */
5433 struct cgraph_local_info *i;
5436 memset (cum, 0, sizeof (*cum));
5438 /* Initialize for the current callee. */
5441 cfun->machine->callee_pass_avx256_p = false;
5442 cfun->machine->callee_return_avx256_p = false;
5447 i = cgraph_local_info (fndecl);
5448 cum->call_abi = ix86_function_abi (fndecl);
5449 fnret_type = TREE_TYPE (TREE_TYPE (fndecl));
5454 cum->call_abi = ix86_function_type_abi (fntype);
5456 fnret_type = TREE_TYPE (fntype);
5461 if (TARGET_VZEROUPPER && fnret_type)
5463 rtx fnret_value = ix86_function_value (fnret_type, fntype,
5465 if (function_pass_avx256_p (fnret_value))
5467 /* The return value of this function uses 256bit AVX modes. */
5469 cfun->machine->callee_return_avx256_p = true;
5471 cfun->machine->caller_return_avx256_p = true;
5475 cum->caller = caller;
5477 /* Set up the number of registers to use for passing arguments. */
5479 if (TARGET_64BIT && cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5480 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5481 "or subtarget optimization implying it");
5482 cum->nregs = ix86_regparm;
5485 cum->nregs = (cum->call_abi == SYSV_ABI
5486 ? X86_64_REGPARM_MAX
5487 : X86_64_MS_REGPARM_MAX);
5491 cum->sse_nregs = SSE_REGPARM_MAX;
5494 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5495 ? X86_64_SSE_REGPARM_MAX
5496 : X86_64_MS_SSE_REGPARM_MAX);
5500 cum->mmx_nregs = MMX_REGPARM_MAX;
5501 cum->warn_avx = true;
5502 cum->warn_sse = true;
5503 cum->warn_mmx = true;
5505 /* Because type might mismatch in between caller and callee, we need to
5506 use actual type of function for local calls.
5507 FIXME: cgraph_analyze can be told to actually record if function uses
5508 va_start so for local functions maybe_vaarg can be made aggressive
5510 FIXME: once typesytem is fixed, we won't need this code anymore. */
5511 if (i && i->local && i->can_change_signature)
5512 fntype = TREE_TYPE (fndecl);
5513 cum->maybe_vaarg = (fntype
5514 ? (!prototype_p (fntype) || stdarg_p (fntype))
5519 /* If there are variable arguments, then we won't pass anything
5520 in registers in 32-bit mode. */
5521 if (stdarg_p (fntype))
5532 /* Use ecx and edx registers if function has fastcall attribute,
5533 else look for regparm information. */
5536 unsigned int ccvt = ix86_get_callcvt (fntype);
5537 if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
5540 cum->fastcall = 1; /* Same first register as in fastcall. */
5542 else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
5548 cum->nregs = ix86_function_regparm (fntype, fndecl);
5551 /* Set up the number of SSE registers used for passing SFmode
5552 and DFmode arguments. Warn for mismatching ABI. */
5553 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5557 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5558 But in the case of vector types, it is some vector mode.
5560 When we have only some of our vector isa extensions enabled, then there
5561 are some modes for which vector_mode_supported_p is false. For these
5562 modes, the generic vector support in gcc will choose some non-vector mode
5563 in order to implement the type. By computing the natural mode, we'll
5564 select the proper ABI location for the operand and not depend on whatever
5565 the middle-end decides to do with these vector types.
5567 The midde-end can't deal with the vector types > 16 bytes. In this
5568 case, we return the original mode and warn ABI change if CUM isn't
5571 static enum machine_mode
5572 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
5574 enum machine_mode mode = TYPE_MODE (type);
5576 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5578 HOST_WIDE_INT size = int_size_in_bytes (type);
5579 if ((size == 8 || size == 16 || size == 32)
5580 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5581 && TYPE_VECTOR_SUBPARTS (type) > 1)
5583 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5585 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5586 mode = MIN_MODE_VECTOR_FLOAT;
5588 mode = MIN_MODE_VECTOR_INT;
5590 /* Get the mode which has this inner mode and number of units. */
5591 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5592 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5593 && GET_MODE_INNER (mode) == innermode)
5595 if (size == 32 && !TARGET_AVX)
5597 static bool warnedavx;
5604 warning (0, "AVX vector argument without AVX "
5605 "enabled changes the ABI");
5607 return TYPE_MODE (type);
5620 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5621 this may not agree with the mode that the type system has chosen for the
5622 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5623 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5626 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5631 if (orig_mode != BLKmode)
5632 tmp = gen_rtx_REG (orig_mode, regno);
5635 tmp = gen_rtx_REG (mode, regno);
5636 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5637 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5643 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5644 of this code is to classify each 8bytes of incoming argument by the register
5645 class and assign registers accordingly. */
5647 /* Return the union class of CLASS1 and CLASS2.
5648 See the x86-64 PS ABI for details. */
5650 static enum x86_64_reg_class
5651 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5653 /* Rule #1: If both classes are equal, this is the resulting class. */
5654 if (class1 == class2)
5657 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5659 if (class1 == X86_64_NO_CLASS)
5661 if (class2 == X86_64_NO_CLASS)
5664 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5665 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5666 return X86_64_MEMORY_CLASS;
5668 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5669 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5670 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5671 return X86_64_INTEGERSI_CLASS;
5672 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5673 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5674 return X86_64_INTEGER_CLASS;
5676 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5678 if (class1 == X86_64_X87_CLASS
5679 || class1 == X86_64_X87UP_CLASS
5680 || class1 == X86_64_COMPLEX_X87_CLASS
5681 || class2 == X86_64_X87_CLASS
5682 || class2 == X86_64_X87UP_CLASS
5683 || class2 == X86_64_COMPLEX_X87_CLASS)
5684 return X86_64_MEMORY_CLASS;
5686 /* Rule #6: Otherwise class SSE is used. */
5687 return X86_64_SSE_CLASS;
5690 /* Classify the argument of type TYPE and mode MODE.
5691 CLASSES will be filled by the register class used to pass each word
5692 of the operand. The number of words is returned. In case the parameter
5693 should be passed in memory, 0 is returned. As a special case for zero
5694 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5696 BIT_OFFSET is used internally for handling records and specifies offset
5697 of the offset in bits modulo 256 to avoid overflow cases.
5699 See the x86-64 PS ABI for details.
5703 classify_argument (enum machine_mode mode, const_tree type,
5704 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5706 HOST_WIDE_INT bytes =
5707 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5708 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5710 /* Variable sized entities are always passed/returned in memory. */
5714 if (mode != VOIDmode
5715 && targetm.calls.must_pass_in_stack (mode, type))
5718 if (type && AGGREGATE_TYPE_P (type))
5722 enum x86_64_reg_class subclasses[MAX_CLASSES];
5724 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5728 for (i = 0; i < words; i++)
5729 classes[i] = X86_64_NO_CLASS;
5731 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5732 signalize memory class, so handle it as special case. */
5735 classes[0] = X86_64_NO_CLASS;
5739 /* Classify each field of record and merge classes. */
5740 switch (TREE_CODE (type))
5743 /* And now merge the fields of structure. */
5744 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5746 if (TREE_CODE (field) == FIELD_DECL)
5750 if (TREE_TYPE (field) == error_mark_node)
5753 /* Bitfields are always classified as integer. Handle them
5754 early, since later code would consider them to be
5755 misaligned integers. */
5756 if (DECL_BIT_FIELD (field))
5758 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5759 i < ((int_bit_position (field) + (bit_offset % 64))
5760 + tree_low_cst (DECL_SIZE (field), 0)
5763 merge_classes (X86_64_INTEGER_CLASS,
5770 type = TREE_TYPE (field);
5772 /* Flexible array member is ignored. */
5773 if (TYPE_MODE (type) == BLKmode
5774 && TREE_CODE (type) == ARRAY_TYPE
5775 && TYPE_SIZE (type) == NULL_TREE
5776 && TYPE_DOMAIN (type) != NULL_TREE
5777 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5782 if (!warned && warn_psabi)
5785 inform (input_location,
5786 "the ABI of passing struct with"
5787 " a flexible array member has"
5788 " changed in GCC 4.4");
5792 num = classify_argument (TYPE_MODE (type), type,
5794 (int_bit_position (field)
5795 + bit_offset) % 256);
5798 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5799 for (i = 0; i < num && (i + pos) < words; i++)
5801 merge_classes (subclasses[i], classes[i + pos]);
5808 /* Arrays are handled as small records. */
5811 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5812 TREE_TYPE (type), subclasses, bit_offset);
5816 /* The partial classes are now full classes. */
5817 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5818 subclasses[0] = X86_64_SSE_CLASS;
5819 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5820 && !((bit_offset % 64) == 0 && bytes == 4))
5821 subclasses[0] = X86_64_INTEGER_CLASS;
5823 for (i = 0; i < words; i++)
5824 classes[i] = subclasses[i % num];
5829 case QUAL_UNION_TYPE:
5830 /* Unions are similar to RECORD_TYPE but offset is always 0.
5832 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5834 if (TREE_CODE (field) == FIELD_DECL)
5838 if (TREE_TYPE (field) == error_mark_node)
5841 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5842 TREE_TYPE (field), subclasses,
5846 for (i = 0; i < num; i++)
5847 classes[i] = merge_classes (subclasses[i], classes[i]);
5858 /* When size > 16 bytes, if the first one isn't
5859 X86_64_SSE_CLASS or any other ones aren't
5860 X86_64_SSEUP_CLASS, everything should be passed in
5862 if (classes[0] != X86_64_SSE_CLASS)
5865 for (i = 1; i < words; i++)
5866 if (classes[i] != X86_64_SSEUP_CLASS)
5870 /* Final merger cleanup. */
5871 for (i = 0; i < words; i++)
5873 /* If one class is MEMORY, everything should be passed in
5875 if (classes[i] == X86_64_MEMORY_CLASS)
5878 /* The X86_64_SSEUP_CLASS should be always preceded by
5879 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5880 if (classes[i] == X86_64_SSEUP_CLASS
5881 && classes[i - 1] != X86_64_SSE_CLASS
5882 && classes[i - 1] != X86_64_SSEUP_CLASS)
5884 /* The first one should never be X86_64_SSEUP_CLASS. */
5885 gcc_assert (i != 0);
5886 classes[i] = X86_64_SSE_CLASS;
5889 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5890 everything should be passed in memory. */
5891 if (classes[i] == X86_64_X87UP_CLASS
5892 && (classes[i - 1] != X86_64_X87_CLASS))
5896 /* The first one should never be X86_64_X87UP_CLASS. */
5897 gcc_assert (i != 0);
5898 if (!warned && warn_psabi)
5901 inform (input_location,
5902 "the ABI of passing union with long double"
5903 " has changed in GCC 4.4");
5911 /* Compute alignment needed. We align all types to natural boundaries with
5912 exception of XFmode that is aligned to 64bits. */
5913 if (mode != VOIDmode && mode != BLKmode)
5915 int mode_alignment = GET_MODE_BITSIZE (mode);
5918 mode_alignment = 128;
5919 else if (mode == XCmode)
5920 mode_alignment = 256;
5921 if (COMPLEX_MODE_P (mode))
5922 mode_alignment /= 2;
5923 /* Misaligned fields are always returned in memory. */
5924 if (bit_offset % mode_alignment)
5928 /* for V1xx modes, just use the base mode */
5929 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5930 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5931 mode = GET_MODE_INNER (mode);
5933 /* Classification of atomic types. */
5938 classes[0] = X86_64_SSE_CLASS;
5941 classes[0] = X86_64_SSE_CLASS;
5942 classes[1] = X86_64_SSEUP_CLASS;
5952 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5956 classes[0] = X86_64_INTEGERSI_CLASS;
5959 else if (size <= 64)
5961 classes[0] = X86_64_INTEGER_CLASS;
5964 else if (size <= 64+32)
5966 classes[0] = X86_64_INTEGER_CLASS;
5967 classes[1] = X86_64_INTEGERSI_CLASS;
5970 else if (size <= 64+64)
5972 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5980 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5984 /* OImode shouldn't be used directly. */
5989 if (!(bit_offset % 64))
5990 classes[0] = X86_64_SSESF_CLASS;
5992 classes[0] = X86_64_SSE_CLASS;
5995 classes[0] = X86_64_SSEDF_CLASS;
5998 classes[0] = X86_64_X87_CLASS;
5999 classes[1] = X86_64_X87UP_CLASS;
6002 classes[0] = X86_64_SSE_CLASS;
6003 classes[1] = X86_64_SSEUP_CLASS;
6006 classes[0] = X86_64_SSE_CLASS;
6007 if (!(bit_offset % 64))
6013 if (!warned && warn_psabi)
6016 inform (input_location,
6017 "the ABI of passing structure with complex float"
6018 " member has changed in GCC 4.4");
6020 classes[1] = X86_64_SSESF_CLASS;
6024 classes[0] = X86_64_SSEDF_CLASS;
6025 classes[1] = X86_64_SSEDF_CLASS;
6028 classes[0] = X86_64_COMPLEX_X87_CLASS;
6031 /* This modes is larger than 16 bytes. */
6039 classes[0] = X86_64_SSE_CLASS;
6040 classes[1] = X86_64_SSEUP_CLASS;
6041 classes[2] = X86_64_SSEUP_CLASS;
6042 classes[3] = X86_64_SSEUP_CLASS;
6050 classes[0] = X86_64_SSE_CLASS;
6051 classes[1] = X86_64_SSEUP_CLASS;
6059 classes[0] = X86_64_SSE_CLASS;
6065 gcc_assert (VECTOR_MODE_P (mode));
6070 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
6072 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
6073 classes[0] = X86_64_INTEGERSI_CLASS;
6075 classes[0] = X86_64_INTEGER_CLASS;
6076 classes[1] = X86_64_INTEGER_CLASS;
6077 return 1 + (bytes > 8);
6081 /* Examine the argument and return set number of register required in each
6082 class. Return 0 iff parameter should be passed in memory. */
6084 examine_argument (enum machine_mode mode, const_tree type, int in_return,
6085 int *int_nregs, int *sse_nregs)
6087 enum x86_64_reg_class regclass[MAX_CLASSES];
6088 int n = classify_argument (mode, type, regclass, 0);
6094 for (n--; n >= 0; n--)
6095 switch (regclass[n])
6097 case X86_64_INTEGER_CLASS:
6098 case X86_64_INTEGERSI_CLASS:
6101 case X86_64_SSE_CLASS:
6102 case X86_64_SSESF_CLASS:
6103 case X86_64_SSEDF_CLASS:
6106 case X86_64_NO_CLASS:
6107 case X86_64_SSEUP_CLASS:
6109 case X86_64_X87_CLASS:
6110 case X86_64_X87UP_CLASS:
6114 case X86_64_COMPLEX_X87_CLASS:
6115 return in_return ? 2 : 0;
6116 case X86_64_MEMORY_CLASS:
6122 /* Construct container for the argument used by GCC interface. See
6123 FUNCTION_ARG for the detailed description. */
6126 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
6127 const_tree type, int in_return, int nintregs, int nsseregs,
6128 const int *intreg, int sse_regno)
6130 /* The following variables hold the static issued_error state. */
6131 static bool issued_sse_arg_error;
6132 static bool issued_sse_ret_error;
6133 static bool issued_x87_ret_error;
6135 enum machine_mode tmpmode;
6137 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
6138 enum x86_64_reg_class regclass[MAX_CLASSES];
6142 int needed_sseregs, needed_intregs;
6143 rtx exp[MAX_CLASSES];
6146 n = classify_argument (mode, type, regclass, 0);
6149 if (!examine_argument (mode, type, in_return, &needed_intregs,
6152 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
6155 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
6156 some less clueful developer tries to use floating-point anyway. */
6157 if (needed_sseregs && !TARGET_SSE)
6161 if (!issued_sse_ret_error)
6163 error ("SSE register return with SSE disabled");
6164 issued_sse_ret_error = true;
6167 else if (!issued_sse_arg_error)
6169 error ("SSE register argument with SSE disabled");
6170 issued_sse_arg_error = true;
6175 /* Likewise, error if the ABI requires us to return values in the
6176 x87 registers and the user specified -mno-80387. */
6177 if (!TARGET_80387 && in_return)
6178 for (i = 0; i < n; i++)
6179 if (regclass[i] == X86_64_X87_CLASS
6180 || regclass[i] == X86_64_X87UP_CLASS
6181 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
6183 if (!issued_x87_ret_error)
6185 error ("x87 register return with x87 disabled");
6186 issued_x87_ret_error = true;
6191 /* First construct simple cases. Avoid SCmode, since we want to use
6192 single register to pass this type. */
6193 if (n == 1 && mode != SCmode)
6194 switch (regclass[0])
6196 case X86_64_INTEGER_CLASS:
6197 case X86_64_INTEGERSI_CLASS:
6198 return gen_rtx_REG (mode, intreg[0]);
6199 case X86_64_SSE_CLASS:
6200 case X86_64_SSESF_CLASS:
6201 case X86_64_SSEDF_CLASS:
6202 if (mode != BLKmode)
6203 return gen_reg_or_parallel (mode, orig_mode,
6204 SSE_REGNO (sse_regno));
6206 case X86_64_X87_CLASS:
6207 case X86_64_COMPLEX_X87_CLASS:
6208 return gen_rtx_REG (mode, FIRST_STACK_REG);
6209 case X86_64_NO_CLASS:
6210 /* Zero sized array, struct or class. */
6215 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
6216 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
6217 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6219 && regclass[0] == X86_64_SSE_CLASS
6220 && regclass[1] == X86_64_SSEUP_CLASS
6221 && regclass[2] == X86_64_SSEUP_CLASS
6222 && regclass[3] == X86_64_SSEUP_CLASS
6224 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
6227 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
6228 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
6229 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
6230 && regclass[1] == X86_64_INTEGER_CLASS
6231 && (mode == CDImode || mode == TImode || mode == TFmode)
6232 && intreg[0] + 1 == intreg[1])
6233 return gen_rtx_REG (mode, intreg[0]);
6235 /* Otherwise figure out the entries of the PARALLEL. */
6236 for (i = 0; i < n; i++)
6240 switch (regclass[i])
6242 case X86_64_NO_CLASS:
6244 case X86_64_INTEGER_CLASS:
6245 case X86_64_INTEGERSI_CLASS:
6246 /* Merge TImodes on aligned occasions here too. */
6247 if (i * 8 + 8 > bytes)
6248 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
6249 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
6253 /* We've requested 24 bytes we don't have mode for. Use DImode. */
6254 if (tmpmode == BLKmode)
6256 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6257 gen_rtx_REG (tmpmode, *intreg),
6261 case X86_64_SSESF_CLASS:
6262 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6263 gen_rtx_REG (SFmode,
6264 SSE_REGNO (sse_regno)),
6268 case X86_64_SSEDF_CLASS:
6269 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6270 gen_rtx_REG (DFmode,
6271 SSE_REGNO (sse_regno)),
6275 case X86_64_SSE_CLASS:
6283 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
6293 && regclass[1] == X86_64_SSEUP_CLASS
6294 && regclass[2] == X86_64_SSEUP_CLASS
6295 && regclass[3] == X86_64_SSEUP_CLASS);
6302 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
6303 gen_rtx_REG (tmpmode,
6304 SSE_REGNO (sse_regno)),
6313 /* Empty aligned struct, union or class. */
6317 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
6318 for (i = 0; i < nexps; i++)
6319 XVECEXP (ret, 0, i) = exp [i];
6323 /* Update the data in CUM to advance over an argument of mode MODE
6324 and data type TYPE. (TYPE is null for libcalls where that information
6325 may not be available.) */
6328 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6329 const_tree type, HOST_WIDE_INT bytes,
6330 HOST_WIDE_INT words)
6346 cum->words += words;
6347 cum->nregs -= words;
6348 cum->regno += words;
6350 if (cum->nregs <= 0)
6358 /* OImode shouldn't be used directly. */
6362 if (cum->float_in_sse < 2)
6365 if (cum->float_in_sse < 1)
6382 if (!type || !AGGREGATE_TYPE_P (type))
6384 cum->sse_words += words;
6385 cum->sse_nregs -= 1;
6386 cum->sse_regno += 1;
6387 if (cum->sse_nregs <= 0)
6401 if (!type || !AGGREGATE_TYPE_P (type))
6403 cum->mmx_words += words;
6404 cum->mmx_nregs -= 1;
6405 cum->mmx_regno += 1;
6406 if (cum->mmx_nregs <= 0)
6417 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6418 const_tree type, HOST_WIDE_INT words, bool named)
6420 int int_nregs, sse_nregs;
6422 /* Unnamed 256bit vector mode parameters are passed on stack. */
6423 if (!named && VALID_AVX256_REG_MODE (mode))
6426 if (examine_argument (mode, type, 0, &int_nregs, &sse_nregs)
6427 && sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6429 cum->nregs -= int_nregs;
6430 cum->sse_nregs -= sse_nregs;
6431 cum->regno += int_nregs;
6432 cum->sse_regno += sse_nregs;
6436 int align = ix86_function_arg_boundary (mode, type) / BITS_PER_WORD;
6437 cum->words = (cum->words + align - 1) & ~(align - 1);
6438 cum->words += words;
6443 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6444 HOST_WIDE_INT words)
6446 /* Otherwise, this should be passed indirect. */
6447 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6449 cum->words += words;
6457 /* Update the data in CUM to advance over an argument of mode MODE and
6458 data type TYPE. (TYPE is null for libcalls where that information
6459 may not be available.) */
6462 ix86_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6463 const_tree type, bool named)
6465 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6466 HOST_WIDE_INT bytes, words;
6468 if (mode == BLKmode)
6469 bytes = int_size_in_bytes (type);
6471 bytes = GET_MODE_SIZE (mode);
6472 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6475 mode = type_natural_mode (type, NULL);
6477 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6478 function_arg_advance_ms_64 (cum, bytes, words);
6479 else if (TARGET_64BIT)
6480 function_arg_advance_64 (cum, mode, type, words, named);
6482 function_arg_advance_32 (cum, mode, type, bytes, words);
6485 /* Define where to put the arguments to a function.
6486 Value is zero to push the argument on the stack,
6487 or a hard register in which to store the argument.
6489 MODE is the argument's machine mode.
6490 TYPE is the data type of the argument (as a tree).
6491 This is null for libcalls where that information may
6493 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6494 the preceding args and about the function being called.
6495 NAMED is nonzero if this argument is a named parameter
6496 (otherwise it is an extra parameter matching an ellipsis). */
6499 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6500 enum machine_mode orig_mode, const_tree type,
6501 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6503 static bool warnedsse, warnedmmx;
6505 /* Avoid the AL settings for the Unix64 ABI. */
6506 if (mode == VOIDmode)
6522 if (words <= cum->nregs)
6524 int regno = cum->regno;
6526 /* Fastcall allocates the first two DWORD (SImode) or
6527 smaller arguments to ECX and EDX if it isn't an
6533 || (type && AGGREGATE_TYPE_P (type)))
6536 /* ECX not EAX is the first allocated register. */
6537 if (regno == AX_REG)
6540 return gen_rtx_REG (mode, regno);
6545 if (cum->float_in_sse < 2)
6548 if (cum->float_in_sse < 1)
6552 /* In 32bit, we pass TImode in xmm registers. */
6559 if (!type || !AGGREGATE_TYPE_P (type))
6561 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6564 warning (0, "SSE vector argument without SSE enabled "
6568 return gen_reg_or_parallel (mode, orig_mode,
6569 cum->sse_regno + FIRST_SSE_REG);
6574 /* OImode shouldn't be used directly. */
6583 if (!type || !AGGREGATE_TYPE_P (type))
6586 return gen_reg_or_parallel (mode, orig_mode,
6587 cum->sse_regno + FIRST_SSE_REG);
6597 if (!type || !AGGREGATE_TYPE_P (type))
6599 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6602 warning (0, "MMX vector argument without MMX enabled "
6606 return gen_reg_or_parallel (mode, orig_mode,
6607 cum->mmx_regno + FIRST_MMX_REG);
6616 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6617 enum machine_mode orig_mode, const_tree type, bool named)
6619 /* Handle a hidden AL argument containing number of registers
6620 for varargs x86-64 functions. */
6621 if (mode == VOIDmode)
6622 return GEN_INT (cum->maybe_vaarg
6623 ? (cum->sse_nregs < 0
6624 ? X86_64_SSE_REGPARM_MAX
6639 /* Unnamed 256bit vector mode parameters are passed on stack. */
6645 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6647 &x86_64_int_parameter_registers [cum->regno],
6652 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6653 enum machine_mode orig_mode, bool named,
6654 HOST_WIDE_INT bytes)
6658 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6659 We use value of -2 to specify that current function call is MSABI. */
6660 if (mode == VOIDmode)
6661 return GEN_INT (-2);
6663 /* If we've run out of registers, it goes on the stack. */
6664 if (cum->nregs == 0)
6667 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6669 /* Only floating point modes are passed in anything but integer regs. */
6670 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6673 regno = cum->regno + FIRST_SSE_REG;
6678 /* Unnamed floating parameters are passed in both the
6679 SSE and integer registers. */
6680 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6681 t2 = gen_rtx_REG (mode, regno);
6682 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6683 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6684 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6687 /* Handle aggregated types passed in register. */
6688 if (orig_mode == BLKmode)
6690 if (bytes > 0 && bytes <= 8)
6691 mode = (bytes > 4 ? DImode : SImode);
6692 if (mode == BLKmode)
6696 return gen_reg_or_parallel (mode, orig_mode, regno);
6699 /* Return where to put the arguments to a function.
6700 Return zero to push the argument on the stack, or a hard register in which to store the argument.
6702 MODE is the argument's machine mode. TYPE is the data type of the
6703 argument. It is null for libcalls where that information may not be
6704 available. CUM gives information about the preceding args and about
6705 the function being called. NAMED is nonzero if this argument is a
6706 named parameter (otherwise it is an extra parameter matching an
6710 ix86_function_arg (cumulative_args_t cum_v, enum machine_mode omode,
6711 const_tree type, bool named)
6713 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6714 enum machine_mode mode = omode;
6715 HOST_WIDE_INT bytes, words;
6718 if (mode == BLKmode)
6719 bytes = int_size_in_bytes (type);
6721 bytes = GET_MODE_SIZE (mode);
6722 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6724 /* To simplify the code below, represent vector types with a vector mode
6725 even if MMX/SSE are not active. */
6726 if (type && TREE_CODE (type) == VECTOR_TYPE)
6727 mode = type_natural_mode (type, cum);
6729 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6730 arg = function_arg_ms_64 (cum, mode, omode, named, bytes);
6731 else if (TARGET_64BIT)
6732 arg = function_arg_64 (cum, mode, omode, type, named);
6734 arg = function_arg_32 (cum, mode, omode, type, bytes, words);
6736 if (TARGET_VZEROUPPER && function_pass_avx256_p (arg))
6738 /* This argument uses 256bit AVX modes. */
6740 cfun->machine->callee_pass_avx256_p = true;
6742 cfun->machine->caller_pass_avx256_p = true;
6748 /* A C expression that indicates when an argument must be passed by
6749 reference. If nonzero for an argument, a copy of that argument is
6750 made in memory and a pointer to the argument is passed instead of
6751 the argument itself. The pointer is passed in whatever way is
6752 appropriate for passing a pointer to that type. */
6755 ix86_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED,
6756 enum machine_mode mode ATTRIBUTE_UNUSED,
6757 const_tree type, bool named ATTRIBUTE_UNUSED)
6759 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6761 /* See Windows x64 Software Convention. */
6762 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6764 int msize = (int) GET_MODE_SIZE (mode);
6767 /* Arrays are passed by reference. */
6768 if (TREE_CODE (type) == ARRAY_TYPE)
6771 if (AGGREGATE_TYPE_P (type))
6773 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6774 are passed by reference. */
6775 msize = int_size_in_bytes (type);
6779 /* __m128 is passed by reference. */
6781 case 1: case 2: case 4: case 8:
6787 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6793 /* Return true when TYPE should be 128bit aligned for 32bit argument
6794 passing ABI. XXX: This function is obsolete and is only used for
6795 checking psABI compatibility with previous versions of GCC. */
6798 ix86_compat_aligned_value_p (const_tree type)
6800 enum machine_mode mode = TYPE_MODE (type);
6801 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6805 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6807 if (TYPE_ALIGN (type) < 128)
6810 if (AGGREGATE_TYPE_P (type))
6812 /* Walk the aggregates recursively. */
6813 switch (TREE_CODE (type))
6817 case QUAL_UNION_TYPE:
6821 /* Walk all the structure fields. */
6822 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6824 if (TREE_CODE (field) == FIELD_DECL
6825 && ix86_compat_aligned_value_p (TREE_TYPE (field)))
6832 /* Just for use if some languages passes arrays by value. */
6833 if (ix86_compat_aligned_value_p (TREE_TYPE (type)))
6844 /* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
6845 XXX: This function is obsolete and is only used for checking psABI
6846 compatibility with previous versions of GCC. */
6849 ix86_compat_function_arg_boundary (enum machine_mode mode,
6850 const_tree type, unsigned int align)
6852 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6853 natural boundaries. */
6854 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6856 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6857 make an exception for SSE modes since these require 128bit
6860 The handling here differs from field_alignment. ICC aligns MMX
6861 arguments to 4 byte boundaries, while structure fields are aligned
6862 to 8 byte boundaries. */
6865 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6866 align = PARM_BOUNDARY;
6870 if (!ix86_compat_aligned_value_p (type))
6871 align = PARM_BOUNDARY;
6874 if (align > BIGGEST_ALIGNMENT)
6875 align = BIGGEST_ALIGNMENT;
6879 /* Return true when TYPE should be 128bit aligned for 32bit argument
6883 ix86_contains_aligned_value_p (const_tree type)
6885 enum machine_mode mode = TYPE_MODE (type);
6887 if (mode == XFmode || mode == XCmode)
6890 if (TYPE_ALIGN (type) < 128)
6893 if (AGGREGATE_TYPE_P (type))
6895 /* Walk the aggregates recursively. */
6896 switch (TREE_CODE (type))
6900 case QUAL_UNION_TYPE:
6904 /* Walk all the structure fields. */
6905 for (field = TYPE_FIELDS (type);
6907 field = DECL_CHAIN (field))
6909 if (TREE_CODE (field) == FIELD_DECL
6910 && ix86_contains_aligned_value_p (TREE_TYPE (field)))
6917 /* Just for use if some languages passes arrays by value. */
6918 if (ix86_contains_aligned_value_p (TREE_TYPE (type)))
6927 return TYPE_ALIGN (type) >= 128;
6932 /* Gives the alignment boundary, in bits, of an argument with the
6933 specified mode and type. */
6936 ix86_function_arg_boundary (enum machine_mode mode, const_tree type)
6941 /* Since the main variant type is used for call, we convert it to
6942 the main variant type. */
6943 type = TYPE_MAIN_VARIANT (type);
6944 align = TYPE_ALIGN (type);
6947 align = GET_MODE_ALIGNMENT (mode);
6948 if (align < PARM_BOUNDARY)
6949 align = PARM_BOUNDARY;
6953 unsigned int saved_align = align;
6957 /* i386 ABI defines XFmode arguments to be 4 byte aligned. */
6960 if (mode == XFmode || mode == XCmode)
6961 align = PARM_BOUNDARY;
6963 else if (!ix86_contains_aligned_value_p (type))
6964 align = PARM_BOUNDARY;
6967 align = PARM_BOUNDARY;
6972 && align != ix86_compat_function_arg_boundary (mode, type,
6976 inform (input_location,
6977 "The ABI for passing parameters with %d-byte"
6978 " alignment has changed in GCC 4.6",
6979 align / BITS_PER_UNIT);
6986 /* Return true if N is a possible register number of function value. */
6989 ix86_function_value_regno_p (const unsigned int regno)
6996 case FIRST_FLOAT_REG:
6997 /* TODO: The function should depend on current function ABI but
6998 builtins.c would need updating then. Therefore we use the
7000 if (TARGET_64BIT && ix86_abi == MS_ABI)
7002 return TARGET_FLOAT_RETURNS_IN_80387;
7008 if (TARGET_MACHO || TARGET_64BIT)
7016 /* Define how to find the value returned by a function.
7017 VALTYPE is the data type of the value (as a tree).
7018 If the precise function being called is known, FUNC is its FUNCTION_DECL;
7019 otherwise, FUNC is 0. */
7022 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
7023 const_tree fntype, const_tree fn)
7027 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
7028 we normally prevent this case when mmx is not available. However
7029 some ABIs may require the result to be returned like DImode. */
7030 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7031 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
7033 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
7034 we prevent this case when sse is not available. However some ABIs
7035 may require the result to be returned like integer TImode. */
7036 else if (mode == TImode
7037 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7038 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
7040 /* 32-byte vector modes in %ymm0. */
7041 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
7042 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
7044 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
7045 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
7046 regno = FIRST_FLOAT_REG;
7048 /* Most things go in %eax. */
7051 /* Override FP return register with %xmm0 for local functions when
7052 SSE math is enabled or for functions with sseregparm attribute. */
7053 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
7055 int sse_level = ix86_function_sseregparm (fntype, fn, false);
7056 if ((sse_level >= 1 && mode == SFmode)
7057 || (sse_level == 2 && mode == DFmode))
7058 regno = FIRST_SSE_REG;
7061 /* OImode shouldn't be used directly. */
7062 gcc_assert (mode != OImode);
7064 return gen_rtx_REG (orig_mode, regno);
7068 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
7073 /* Handle libcalls, which don't provide a type node. */
7074 if (valtype == NULL)
7086 return gen_rtx_REG (mode, FIRST_SSE_REG);
7089 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
7093 return gen_rtx_REG (mode, AX_REG);
7096 else if (POINTER_TYPE_P (valtype))
7098 /* Pointers are always returned in Pmode. */
7102 ret = construct_container (mode, orig_mode, valtype, 1,
7103 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
7104 x86_64_int_return_registers, 0);
7106 /* For zero sized structures, construct_container returns NULL, but we
7107 need to keep rest of compiler happy by returning meaningful value. */
7109 ret = gen_rtx_REG (orig_mode, AX_REG);
7115 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
7117 unsigned int regno = AX_REG;
7121 switch (GET_MODE_SIZE (mode))
7124 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7125 && !COMPLEX_MODE_P (mode))
7126 regno = FIRST_SSE_REG;
7130 if (mode == SFmode || mode == DFmode)
7131 regno = FIRST_SSE_REG;
7137 return gen_rtx_REG (orig_mode, regno);
7141 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
7142 enum machine_mode orig_mode, enum machine_mode mode)
7144 const_tree fn, fntype;
7147 if (fntype_or_decl && DECL_P (fntype_or_decl))
7148 fn = fntype_or_decl;
7149 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
7151 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
7152 return function_value_ms_64 (orig_mode, mode);
7153 else if (TARGET_64BIT)
7154 return function_value_64 (orig_mode, mode, valtype);
7156 return function_value_32 (orig_mode, mode, fntype, fn);
7160 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
7161 bool outgoing ATTRIBUTE_UNUSED)
7163 enum machine_mode mode, orig_mode;
7165 orig_mode = TYPE_MODE (valtype);
7166 mode = type_natural_mode (valtype, NULL);
7167 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
7170 /* Pointer function arguments and return values are promoted to Pmode. */
7172 static enum machine_mode
7173 ix86_promote_function_mode (const_tree type, enum machine_mode mode,
7174 int *punsignedp, const_tree fntype,
7177 if (type != NULL_TREE && POINTER_TYPE_P (type))
7179 *punsignedp = POINTERS_EXTEND_UNSIGNED;
7182 return default_promote_function_mode (type, mode, punsignedp, fntype,
7187 ix86_libcall_value (enum machine_mode mode)
7189 return ix86_function_value_1 (NULL, NULL, mode, mode);
7192 /* Return true iff type is returned in memory. */
7194 static bool ATTRIBUTE_UNUSED
7195 return_in_memory_32 (const_tree type, enum machine_mode mode)
7199 if (mode == BLKmode)
7202 size = int_size_in_bytes (type);
7204 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
7207 if (VECTOR_MODE_P (mode) || mode == TImode)
7209 /* User-created vectors small enough to fit in EAX. */
7213 /* MMX/3dNow values are returned in MM0,
7214 except when it doesn't exits or the ABI prescribes otherwise. */
7216 return !TARGET_MMX || TARGET_VECT8_RETURNS;
7218 /* SSE values are returned in XMM0, except when it doesn't exist. */
7222 /* AVX values are returned in YMM0, except when it doesn't exist. */
7233 /* OImode shouldn't be used directly. */
7234 gcc_assert (mode != OImode);
7239 static bool ATTRIBUTE_UNUSED
7240 return_in_memory_64 (const_tree type, enum machine_mode mode)
7242 int needed_intregs, needed_sseregs;
7243 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
7246 static bool ATTRIBUTE_UNUSED
7247 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
7249 HOST_WIDE_INT size = int_size_in_bytes (type);
7251 /* __m128 is returned in xmm0. */
7252 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
7253 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
7256 /* Otherwise, the size must be exactly in [1248]. */
7257 return size != 1 && size != 2 && size != 4 && size != 8;
7261 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7263 #ifdef SUBTARGET_RETURN_IN_MEMORY
7264 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
7266 const enum machine_mode mode = type_natural_mode (type, NULL);
7270 if (ix86_function_type_abi (fntype) == MS_ABI)
7271 return return_in_memory_ms_64 (type, mode);
7273 return return_in_memory_64 (type, mode);
7276 return return_in_memory_32 (type, mode);
7280 /* When returning SSE vector types, we have a choice of either
7281 (1) being abi incompatible with a -march switch, or
7282 (2) generating an error.
7283 Given no good solution, I think the safest thing is one warning.
7284 The user won't be able to use -Werror, but....
7286 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
7287 called in response to actually generating a caller or callee that
7288 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
7289 via aggregate_value_p for general type probing from tree-ssa. */
7292 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
7294 static bool warnedsse, warnedmmx;
7296 if (!TARGET_64BIT && type)
7298 /* Look at the return type of the function, not the function type. */
7299 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
7301 if (!TARGET_SSE && !warnedsse)
7304 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
7307 warning (0, "SSE vector return without SSE enabled "
7312 if (!TARGET_MMX && !warnedmmx)
7314 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
7317 warning (0, "MMX vector return without MMX enabled "
7327 /* Create the va_list data type. */
7329 /* Returns the calling convention specific va_list date type.
7330 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
7333 ix86_build_builtin_va_list_abi (enum calling_abi abi)
7335 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
7337 /* For i386 we use plain pointer to argument area. */
7338 if (!TARGET_64BIT || abi == MS_ABI)
7339 return build_pointer_type (char_type_node);
7341 record = lang_hooks.types.make_type (RECORD_TYPE);
7342 type_decl = build_decl (BUILTINS_LOCATION,
7343 TYPE_DECL, get_identifier ("__va_list_tag"), record);
7345 f_gpr = build_decl (BUILTINS_LOCATION,
7346 FIELD_DECL, get_identifier ("gp_offset"),
7347 unsigned_type_node);
7348 f_fpr = build_decl (BUILTINS_LOCATION,
7349 FIELD_DECL, get_identifier ("fp_offset"),
7350 unsigned_type_node);
7351 f_ovf = build_decl (BUILTINS_LOCATION,
7352 FIELD_DECL, get_identifier ("overflow_arg_area"),
7354 f_sav = build_decl (BUILTINS_LOCATION,
7355 FIELD_DECL, get_identifier ("reg_save_area"),
7358 va_list_gpr_counter_field = f_gpr;
7359 va_list_fpr_counter_field = f_fpr;
7361 DECL_FIELD_CONTEXT (f_gpr) = record;
7362 DECL_FIELD_CONTEXT (f_fpr) = record;
7363 DECL_FIELD_CONTEXT (f_ovf) = record;
7364 DECL_FIELD_CONTEXT (f_sav) = record;
7366 TYPE_STUB_DECL (record) = type_decl;
7367 TYPE_NAME (record) = type_decl;
7368 TYPE_FIELDS (record) = f_gpr;
7369 DECL_CHAIN (f_gpr) = f_fpr;
7370 DECL_CHAIN (f_fpr) = f_ovf;
7371 DECL_CHAIN (f_ovf) = f_sav;
7373 layout_type (record);
7375 /* The correct type is an array type of one element. */
7376 return build_array_type (record, build_index_type (size_zero_node));
7379 /* Setup the builtin va_list data type and for 64-bit the additional
7380 calling convention specific va_list data types. */
7383 ix86_build_builtin_va_list (void)
7385 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
7387 /* Initialize abi specific va_list builtin types. */
7391 if (ix86_abi == MS_ABI)
7393 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
7394 if (TREE_CODE (t) != RECORD_TYPE)
7395 t = build_variant_type_copy (t);
7396 sysv_va_list_type_node = t;
7401 if (TREE_CODE (t) != RECORD_TYPE)
7402 t = build_variant_type_copy (t);
7403 sysv_va_list_type_node = t;
7405 if (ix86_abi != MS_ABI)
7407 t = ix86_build_builtin_va_list_abi (MS_ABI);
7408 if (TREE_CODE (t) != RECORD_TYPE)
7409 t = build_variant_type_copy (t);
7410 ms_va_list_type_node = t;
7415 if (TREE_CODE (t) != RECORD_TYPE)
7416 t = build_variant_type_copy (t);
7417 ms_va_list_type_node = t;
7424 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
7427 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
7433 /* GPR size of varargs save area. */
7434 if (cfun->va_list_gpr_size)
7435 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
7437 ix86_varargs_gpr_size = 0;
7439 /* FPR size of varargs save area. We don't need it if we don't pass
7440 anything in SSE registers. */
7441 if (TARGET_SSE && cfun->va_list_fpr_size)
7442 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
7444 ix86_varargs_fpr_size = 0;
7446 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
7449 save_area = frame_pointer_rtx;
7450 set = get_varargs_alias_set ();
7452 max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
7453 if (max > X86_64_REGPARM_MAX)
7454 max = X86_64_REGPARM_MAX;
7456 for (i = cum->regno; i < max; i++)
7458 mem = gen_rtx_MEM (Pmode,
7459 plus_constant (save_area, i * UNITS_PER_WORD));
7460 MEM_NOTRAP_P (mem) = 1;
7461 set_mem_alias_set (mem, set);
7462 emit_move_insn (mem, gen_rtx_REG (Pmode,
7463 x86_64_int_parameter_registers[i]));
7466 if (ix86_varargs_fpr_size)
7468 enum machine_mode smode;
7471 /* Now emit code to save SSE registers. The AX parameter contains number
7472 of SSE parameter registers used to call this function, though all we
7473 actually check here is the zero/non-zero status. */
7475 label = gen_label_rtx ();
7476 test = gen_rtx_EQ (VOIDmode, gen_rtx_REG (QImode, AX_REG), const0_rtx);
7477 emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
7480 /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
7481 we used movdqa (i.e. TImode) instead? Perhaps even better would
7482 be if we could determine the real mode of the data, via a hook
7483 into pass_stdarg. Ignore all that for now. */
7485 if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
7486 crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);
7488 max = cum->sse_regno + cfun->va_list_fpr_size / 16;
7489 if (max > X86_64_SSE_REGPARM_MAX)
7490 max = X86_64_SSE_REGPARM_MAX;
7492 for (i = cum->sse_regno; i < max; ++i)
7494 mem = plus_constant (save_area, i * 16 + ix86_varargs_gpr_size);
7495 mem = gen_rtx_MEM (smode, mem);
7496 MEM_NOTRAP_P (mem) = 1;
7497 set_mem_alias_set (mem, set);
7498 set_mem_align (mem, GET_MODE_ALIGNMENT (smode));
7500 emit_move_insn (mem, gen_rtx_REG (smode, SSE_REGNO (i)));
7508 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7510 alias_set_type set = get_varargs_alias_set ();
7513 /* Reset to zero, as there might be a sysv vaarg used
7515 ix86_varargs_gpr_size = 0;
7516 ix86_varargs_fpr_size = 0;
7518 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
7522 mem = gen_rtx_MEM (Pmode,
7523 plus_constant (virtual_incoming_args_rtx,
7524 i * UNITS_PER_WORD));
7525 MEM_NOTRAP_P (mem) = 1;
7526 set_mem_alias_set (mem, set);
7528 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
7529 emit_move_insn (mem, reg);
7534 ix86_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode,
7535 tree type, int *pretend_size ATTRIBUTE_UNUSED,
7538 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7539 CUMULATIVE_ARGS next_cum;
7542 /* This argument doesn't appear to be used anymore. Which is good,
7543 because the old code here didn't suppress rtl generation. */
7544 gcc_assert (!no_rtl);
7549 fntype = TREE_TYPE (current_function_decl);
7551 /* For varargs, we do not want to skip the dummy va_dcl argument.
7552 For stdargs, we do want to skip the last named argument. */
7554 if (stdarg_p (fntype))
7555 ix86_function_arg_advance (pack_cumulative_args (&next_cum), mode, type,
7558 if (cum->call_abi == MS_ABI)
7559 setup_incoming_varargs_ms_64 (&next_cum);
7561 setup_incoming_varargs_64 (&next_cum);
7564 /* Checks if TYPE is of kind va_list char *. */
7567 is_va_list_char_pointer (tree type)
7571 /* For 32-bit it is always true. */
7574 canonic = ix86_canonical_va_list_type (type);
7575 return (canonic == ms_va_list_type_node
7576 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
7579 /* Implement va_start. */
7582 ix86_va_start (tree valist, rtx nextarg)
7584 HOST_WIDE_INT words, n_gpr, n_fpr;
7585 tree f_gpr, f_fpr, f_ovf, f_sav;
7586 tree gpr, fpr, ovf, sav, t;
7590 if (flag_split_stack
7591 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7593 unsigned int scratch_regno;
7595 /* When we are splitting the stack, we can't refer to the stack
7596 arguments using internal_arg_pointer, because they may be on
7597 the old stack. The split stack prologue will arrange to
7598 leave a pointer to the old stack arguments in a scratch
7599 register, which we here copy to a pseudo-register. The split
7600 stack prologue can't set the pseudo-register directly because
7601 it (the prologue) runs before any registers have been saved. */
7603 scratch_regno = split_stack_prologue_scratch_regno ();
7604 if (scratch_regno != INVALID_REGNUM)
7608 reg = gen_reg_rtx (Pmode);
7609 cfun->machine->split_stack_varargs_pointer = reg;
7612 emit_move_insn (reg, gen_rtx_REG (Pmode, scratch_regno));
7616 push_topmost_sequence ();
7617 emit_insn_after (seq, entry_of_function ());
7618 pop_topmost_sequence ();
7622 /* Only 64bit target needs something special. */
7623 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7625 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7626 std_expand_builtin_va_start (valist, nextarg);
7631 va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE);
7632 next = expand_binop (ptr_mode, add_optab,
7633 cfun->machine->split_stack_varargs_pointer,
7634 crtl->args.arg_offset_rtx,
7635 NULL_RTX, 0, OPTAB_LIB_WIDEN);
7636 convert_move (va_r, next, 0);
7641 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7642 f_fpr = DECL_CHAIN (f_gpr);
7643 f_ovf = DECL_CHAIN (f_fpr);
7644 f_sav = DECL_CHAIN (f_ovf);
7646 valist = build_simple_mem_ref (valist);
7647 TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
7648 /* The following should be folded into the MEM_REF offset. */
7649 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
7651 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
7653 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
7655 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
7658 /* Count number of gp and fp argument registers used. */
7659 words = crtl->args.info.words;
7660 n_gpr = crtl->args.info.regno;
7661 n_fpr = crtl->args.info.sse_regno;
7663 if (cfun->va_list_gpr_size)
7665 type = TREE_TYPE (gpr);
7666 t = build2 (MODIFY_EXPR, type,
7667 gpr, build_int_cst (type, n_gpr * 8));
7668 TREE_SIDE_EFFECTS (t) = 1;
7669 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7672 if (TARGET_SSE && cfun->va_list_fpr_size)
7674 type = TREE_TYPE (fpr);
7675 t = build2 (MODIFY_EXPR, type, fpr,
7676 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7677 TREE_SIDE_EFFECTS (t) = 1;
7678 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7681 /* Find the overflow area. */
7682 type = TREE_TYPE (ovf);
7683 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
7684 ovf_rtx = crtl->args.internal_arg_pointer;
7686 ovf_rtx = cfun->machine->split_stack_varargs_pointer;
7687 t = make_tree (type, ovf_rtx);
7689 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
7690 t = build2 (MODIFY_EXPR, type, ovf, t);
7691 TREE_SIDE_EFFECTS (t) = 1;
7692 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7694 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7696 /* Find the register save area.
7697 Prologue of the function save it right above stack frame. */
7698 type = TREE_TYPE (sav);
7699 t = make_tree (type, frame_pointer_rtx);
7700 if (!ix86_varargs_gpr_size)
7701 t = fold_build_pointer_plus_hwi (t, -8 * X86_64_REGPARM_MAX);
7702 t = build2 (MODIFY_EXPR, type, sav, t);
7703 TREE_SIDE_EFFECTS (t) = 1;
7704 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7708 /* Implement va_arg. */
7711 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7714 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7715 tree f_gpr, f_fpr, f_ovf, f_sav;
7716 tree gpr, fpr, ovf, sav, t;
7718 tree lab_false, lab_over = NULL_TREE;
7723 enum machine_mode nat_mode;
7724 unsigned int arg_boundary;
7726 /* Only 64bit target needs something special. */
7727 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7728 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7730 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7731 f_fpr = DECL_CHAIN (f_gpr);
7732 f_ovf = DECL_CHAIN (f_fpr);
7733 f_sav = DECL_CHAIN (f_ovf);
7735 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7736 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7737 valist = build_va_arg_indirect_ref (valist);
7738 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7739 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7740 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7742 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7744 type = build_pointer_type (type);
7745 size = int_size_in_bytes (type);
7746 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7748 nat_mode = type_natural_mode (type, NULL);
7757 /* Unnamed 256bit vector mode parameters are passed on stack. */
7758 if (!TARGET_64BIT_MS_ABI)
7765 container = construct_container (nat_mode, TYPE_MODE (type),
7766 type, 0, X86_64_REGPARM_MAX,
7767 X86_64_SSE_REGPARM_MAX, intreg,
7772 /* Pull the value out of the saved registers. */
7774 addr = create_tmp_var (ptr_type_node, "addr");
7778 int needed_intregs, needed_sseregs;
7780 tree int_addr, sse_addr;
7782 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7783 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7785 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7787 need_temp = (!REG_P (container)
7788 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7789 || TYPE_ALIGN (type) > 128));
7791 /* In case we are passing structure, verify that it is consecutive block
7792 on the register save area. If not we need to do moves. */
7793 if (!need_temp && !REG_P (container))
7795 /* Verify that all registers are strictly consecutive */
7796 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7800 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7802 rtx slot = XVECEXP (container, 0, i);
7803 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7804 || INTVAL (XEXP (slot, 1)) != i * 16)
7812 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7814 rtx slot = XVECEXP (container, 0, i);
7815 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7816 || INTVAL (XEXP (slot, 1)) != i * 8)
7828 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7829 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7832 /* First ensure that we fit completely in registers. */
7835 t = build_int_cst (TREE_TYPE (gpr),
7836 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7837 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7838 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7839 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7840 gimplify_and_add (t, pre_p);
7844 t = build_int_cst (TREE_TYPE (fpr),
7845 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7846 + X86_64_REGPARM_MAX * 8);
7847 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7848 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7849 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7850 gimplify_and_add (t, pre_p);
7853 /* Compute index to start of area used for integer regs. */
7856 /* int_addr = gpr + sav; */
7857 t = fold_build_pointer_plus (sav, gpr);
7858 gimplify_assign (int_addr, t, pre_p);
7862 /* sse_addr = fpr + sav; */
7863 t = fold_build_pointer_plus (sav, fpr);
7864 gimplify_assign (sse_addr, t, pre_p);
7868 int i, prev_size = 0;
7869 tree temp = create_tmp_var (type, "va_arg_tmp");
7872 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7873 gimplify_assign (addr, t, pre_p);
7875 for (i = 0; i < XVECLEN (container, 0); i++)
7877 rtx slot = XVECEXP (container, 0, i);
7878 rtx reg = XEXP (slot, 0);
7879 enum machine_mode mode = GET_MODE (reg);
7885 tree dest_addr, dest;
7886 int cur_size = GET_MODE_SIZE (mode);
7888 gcc_assert (prev_size <= INTVAL (XEXP (slot, 1)));
7889 prev_size = INTVAL (XEXP (slot, 1));
7890 if (prev_size + cur_size > size)
7892 cur_size = size - prev_size;
7893 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
7894 if (mode == BLKmode)
7897 piece_type = lang_hooks.types.type_for_mode (mode, 1);
7898 if (mode == GET_MODE (reg))
7899 addr_type = build_pointer_type (piece_type);
7901 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
7903 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
7906 if (SSE_REGNO_P (REGNO (reg)))
7908 src_addr = sse_addr;
7909 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7913 src_addr = int_addr;
7914 src_offset = REGNO (reg) * 8;
7916 src_addr = fold_convert (addr_type, src_addr);
7917 src_addr = fold_build_pointer_plus_hwi (src_addr, src_offset);
7919 dest_addr = fold_convert (daddr_type, addr);
7920 dest_addr = fold_build_pointer_plus_hwi (dest_addr, prev_size);
7921 if (cur_size == GET_MODE_SIZE (mode))
7923 src = build_va_arg_indirect_ref (src_addr);
7924 dest = build_va_arg_indirect_ref (dest_addr);
7926 gimplify_assign (dest, src, pre_p);
7931 = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
7932 3, dest_addr, src_addr,
7933 size_int (cur_size));
7934 gimplify_and_add (copy, pre_p);
7936 prev_size += cur_size;
7942 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7943 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7944 gimplify_assign (gpr, t, pre_p);
7949 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7950 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7951 gimplify_assign (fpr, t, pre_p);
7954 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7956 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7959 /* ... otherwise out of the overflow area. */
7961 /* When we align parameter on stack for caller, if the parameter
7962 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7963 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7964 here with caller. */
7965 arg_boundary = ix86_function_arg_boundary (VOIDmode, type);
7966 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7967 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7969 /* Care for on-stack alignment if needed. */
7970 if (arg_boundary <= 64 || size == 0)
7974 HOST_WIDE_INT align = arg_boundary / 8;
7975 t = fold_build_pointer_plus_hwi (ovf, align - 1);
7976 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7977 build_int_cst (TREE_TYPE (t), -align));
7980 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7981 gimplify_assign (addr, t, pre_p);
7983 t = fold_build_pointer_plus_hwi (t, rsize * UNITS_PER_WORD);
7984 gimplify_assign (unshare_expr (ovf), t, pre_p);
7987 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7989 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7990 addr = fold_convert (ptrtype, addr);
7993 addr = build_va_arg_indirect_ref (addr);
7994 return build_va_arg_indirect_ref (addr);
7997 /* Return true if OPNUM's MEM should be matched
7998 in movabs* patterns. */
8001 ix86_check_movabs (rtx insn, int opnum)
8005 set = PATTERN (insn);
8006 if (GET_CODE (set) == PARALLEL)
8007 set = XVECEXP (set, 0, 0);
8008 gcc_assert (GET_CODE (set) == SET);
8009 mem = XEXP (set, opnum);
8010 while (GET_CODE (mem) == SUBREG)
8011 mem = SUBREG_REG (mem);
8012 gcc_assert (MEM_P (mem));
8013 return volatile_ok || !MEM_VOLATILE_P (mem);
8016 /* Initialize the table of extra 80387 mathematical constants. */
8019 init_ext_80387_constants (void)
8021 static const char * cst[5] =
8023 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
8024 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
8025 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
8026 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
8027 "3.1415926535897932385128089594061862044", /* 4: fldpi */
8031 for (i = 0; i < 5; i++)
8033 real_from_string (&ext_80387_constants_table[i], cst[i]);
8034 /* Ensure each constant is rounded to XFmode precision. */
8035 real_convert (&ext_80387_constants_table[i],
8036 XFmode, &ext_80387_constants_table[i]);
8039 ext_80387_constants_init = 1;
8042 /* Return non-zero if the constant is something that
8043 can be loaded with a special instruction. */
8046 standard_80387_constant_p (rtx x)
8048 enum machine_mode mode = GET_MODE (x);
8052 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
8055 if (x == CONST0_RTX (mode))
8057 if (x == CONST1_RTX (mode))
8060 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8062 /* For XFmode constants, try to find a special 80387 instruction when
8063 optimizing for size or on those CPUs that benefit from them. */
8065 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
8069 if (! ext_80387_constants_init)
8070 init_ext_80387_constants ();
8072 for (i = 0; i < 5; i++)
8073 if (real_identical (&r, &ext_80387_constants_table[i]))
8077 /* Load of the constant -0.0 or -1.0 will be split as
8078 fldz;fchs or fld1;fchs sequence. */
8079 if (real_isnegzero (&r))
8081 if (real_identical (&r, &dconstm1))
8087 /* Return the opcode of the special instruction to be used to load
8091 standard_80387_constant_opcode (rtx x)
8093 switch (standard_80387_constant_p (x))
8117 /* Return the CONST_DOUBLE representing the 80387 constant that is
8118 loaded by the specified special instruction. The argument IDX
8119 matches the return value from standard_80387_constant_p. */
8122 standard_80387_constant_rtx (int idx)
8126 if (! ext_80387_constants_init)
8127 init_ext_80387_constants ();
8143 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
8147 /* Return 1 if X is all 0s and 2 if x is all 1s
8148 in supported SSE vector mode. */
8151 standard_sse_constant_p (rtx x)
8153 enum machine_mode mode = GET_MODE (x);
8155 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
8157 if (vector_all_ones_operand (x, mode))
8173 /* Return the opcode of the special instruction to be used to load
8177 standard_sse_constant_opcode (rtx insn, rtx x)
8179 switch (standard_sse_constant_p (x))
8182 switch (get_attr_mode (insn))
8185 if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8186 return "%vpxor\t%0, %d0";
8188 if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8189 return "%vxorpd\t%0, %d0";
8191 return "%vxorps\t%0, %d0";
8194 if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8195 return "vpxor\t%x0, %x0, %x0";
8197 if (!TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
8198 return "vxorpd\t%x0, %x0, %x0";
8200 return "vxorps\t%x0, %x0, %x0";
8207 return "%vpcmpeqd\t%0, %d0";
8214 /* Returns true if OP contains a symbol reference */
8217 symbolic_reference_mentioned_p (rtx op)
8222 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
8225 fmt = GET_RTX_FORMAT (GET_CODE (op));
8226 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
8232 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
8233 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
8237 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
8244 /* Return true if it is appropriate to emit `ret' instructions in the
8245 body of a function. Do this only if the epilogue is simple, needing a
8246 couple of insns. Prior to reloading, we can't tell how many registers
8247 must be saved, so return false then. Return false if there is no frame
8248 marker to de-allocate. */
8251 ix86_can_use_return_insn_p (void)
8253 struct ix86_frame frame;
8255 if (! reload_completed || frame_pointer_needed)
8258 /* Don't allow more than 32k pop, since that's all we can do
8259 with one instruction. */
8260 if (crtl->args.pops_args && crtl->args.size >= 32768)
8263 ix86_compute_frame_layout (&frame);
8264 return (frame.stack_pointer_offset == UNITS_PER_WORD
8265 && (frame.nregs + frame.nsseregs) == 0);
8268 /* Value should be nonzero if functions must have frame pointers.
8269 Zero means the frame pointer need not be set up (and parms may
8270 be accessed via the stack pointer) in functions that seem suitable. */
8273 ix86_frame_pointer_required (void)
8275 /* If we accessed previous frames, then the generated code expects
8276 to be able to access the saved ebp value in our frame. */
8277 if (cfun->machine->accesses_prev_frame)
8280 /* Several x86 os'es need a frame pointer for other reasons,
8281 usually pertaining to setjmp. */
8282 if (SUBTARGET_FRAME_POINTER_REQUIRED)
8285 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
8286 turns off the frame pointer by default. Turn it back on now if
8287 we've not got a leaf function. */
8288 if (TARGET_OMIT_LEAF_FRAME_POINTER
8289 && (!current_function_is_leaf
8290 || ix86_current_function_calls_tls_descriptor))
8293 if (crtl->profile && !flag_fentry)
8299 /* Record that the current function accesses previous call frames. */
8302 ix86_setup_frame_addresses (void)
8304 cfun->machine->accesses_prev_frame = 1;
8307 #ifndef USE_HIDDEN_LINKONCE
8308 # if defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)
8309 # define USE_HIDDEN_LINKONCE 1
8311 # define USE_HIDDEN_LINKONCE 0
8315 static int pic_labels_used;
8317 /* Fills in the label name that should be used for a pc thunk for
8318 the given register. */
8321 get_pc_thunk_name (char name[32], unsigned int regno)
8323 gcc_assert (!TARGET_64BIT);
8325 if (USE_HIDDEN_LINKONCE)
8326 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
8328 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
8332 /* This function generates code for -fpic that loads %ebx with
8333 the return address of the caller and then returns. */
8336 ix86_code_end (void)
8341 for (regno = AX_REG; regno <= SP_REG; regno++)
8346 if (!(pic_labels_used & (1 << regno)))
8349 get_pc_thunk_name (name, regno);
8351 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
8352 get_identifier (name),
8353 build_function_type_list (void_type_node, NULL_TREE));
8354 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
8355 NULL_TREE, void_type_node);
8356 TREE_PUBLIC (decl) = 1;
8357 TREE_STATIC (decl) = 1;
8362 switch_to_section (darwin_sections[text_coal_section]);
8363 fputs ("\t.weak_definition\t", asm_out_file);
8364 assemble_name (asm_out_file, name);
8365 fputs ("\n\t.private_extern\t", asm_out_file);
8366 assemble_name (asm_out_file, name);
8367 putc ('\n', asm_out_file);
8368 ASM_OUTPUT_LABEL (asm_out_file, name);
8369 DECL_WEAK (decl) = 1;
8373 if (USE_HIDDEN_LINKONCE)
8375 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
8377 targetm.asm_out.unique_section (decl, 0);
8378 switch_to_section (get_named_section (decl, NULL, 0));
8380 targetm.asm_out.globalize_label (asm_out_file, name);
8381 fputs ("\t.hidden\t", asm_out_file);
8382 assemble_name (asm_out_file, name);
8383 putc ('\n', asm_out_file);
8384 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
8388 switch_to_section (text_section);
8389 ASM_OUTPUT_LABEL (asm_out_file, name);
8392 DECL_INITIAL (decl) = make_node (BLOCK);
8393 current_function_decl = decl;
8394 init_function_start (decl);
8395 first_function_block_is_cold = false;
8396 /* Make sure unwind info is emitted for the thunk if needed. */
8397 final_start_function (emit_barrier (), asm_out_file, 1);
8399 /* Pad stack IP move with 4 instructions (two NOPs count
8400 as one instruction). */
8401 if (TARGET_PAD_SHORT_FUNCTION)
8406 fputs ("\tnop\n", asm_out_file);
8409 xops[0] = gen_rtx_REG (Pmode, regno);
8410 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8411 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
8412 fputs ("\tret\n", asm_out_file);
8413 final_end_function ();
8414 init_insn_lengths ();
8415 free_after_compilation (cfun);
8417 current_function_decl = NULL;
8420 if (flag_split_stack)
8421 file_end_indicate_split_stack ();
8424 /* Emit code for the SET_GOT patterns. */
8427 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
8433 if (TARGET_VXWORKS_RTP && flag_pic)
8435 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
8436 xops[2] = gen_rtx_MEM (Pmode,
8437 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
8438 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
8440 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
8441 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
8442 an unadorned address. */
8443 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
8444 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
8445 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
8449 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
8453 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
8455 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
8458 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8459 is what will be referenced by the Mach-O PIC subsystem. */
8461 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8464 targetm.asm_out.internal_label (asm_out_file, "L",
8465 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
8470 get_pc_thunk_name (name, REGNO (dest));
8471 pic_labels_used |= 1 << REGNO (dest);
8473 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
8474 xops[2] = gen_rtx_MEM (QImode, xops[2]);
8475 output_asm_insn ("call\t%X2", xops);
8476 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
8477 is what will be referenced by the Mach-O PIC subsystem. */
8480 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
8482 targetm.asm_out.internal_label (asm_out_file, "L",
8483 CODE_LABEL_NUMBER (label));
8488 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
8493 /* Generate an "push" pattern for input ARG. */
8498 struct machine_function *m = cfun->machine;
8500 if (m->fs.cfa_reg == stack_pointer_rtx)
8501 m->fs.cfa_offset += UNITS_PER_WORD;
8502 m->fs.sp_offset += UNITS_PER_WORD;
8504 return gen_rtx_SET (VOIDmode,
8506 gen_rtx_PRE_DEC (Pmode,
8507 stack_pointer_rtx)),
8511 /* Generate an "pop" pattern for input ARG. */
8516 return gen_rtx_SET (VOIDmode,
8519 gen_rtx_POST_INC (Pmode,
8520 stack_pointer_rtx)));
8523 /* Return >= 0 if there is an unused call-clobbered register available
8524 for the entire function. */
8527 ix86_select_alt_pic_regnum (void)
8529 if (current_function_is_leaf
8531 && !ix86_current_function_calls_tls_descriptor)
8534 /* Can't use the same register for both PIC and DRAP. */
8536 drap = REGNO (crtl->drap_reg);
8539 for (i = 2; i >= 0; --i)
8540 if (i != drap && !df_regs_ever_live_p (i))
8544 return INVALID_REGNUM;
8547 /* Return TRUE if we need to save REGNO. */
8550 ix86_save_reg (unsigned int regno, bool maybe_eh_return)
8552 if (pic_offset_table_rtx
8553 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
8554 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8556 || crtl->calls_eh_return
8557 || crtl->uses_const_pool))
8558 return ix86_select_alt_pic_regnum () == INVALID_REGNUM;
8560 if (crtl->calls_eh_return && maybe_eh_return)
8565 unsigned test = EH_RETURN_DATA_REGNO (i);
8566 if (test == INVALID_REGNUM)
8573 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
8576 return (df_regs_ever_live_p (regno)
8577 && !call_used_regs[regno]
8578 && !fixed_regs[regno]
8579 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
8582 /* Return number of saved general prupose registers. */
8585 ix86_nsaved_regs (void)
8590 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8591 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8596 /* Return number of saved SSE registrers. */
8599 ix86_nsaved_sseregs (void)
8604 if (!TARGET_64BIT_MS_ABI)
8606 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8607 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8612 /* Given FROM and TO register numbers, say whether this elimination is
8613 allowed. If stack alignment is needed, we can only replace argument
8614 pointer with hard frame pointer, or replace frame pointer with stack
8615 pointer. Otherwise, frame pointer elimination is automatically
8616 handled and all other eliminations are valid. */
8619 ix86_can_eliminate (const int from, const int to)
8621 if (stack_realign_fp)
8622 return ((from == ARG_POINTER_REGNUM
8623 && to == HARD_FRAME_POINTER_REGNUM)
8624 || (from == FRAME_POINTER_REGNUM
8625 && to == STACK_POINTER_REGNUM));
8627 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
8630 /* Return the offset between two registers, one to be eliminated, and the other
8631 its replacement, at the start of a routine. */
8634 ix86_initial_elimination_offset (int from, int to)
8636 struct ix86_frame frame;
8637 ix86_compute_frame_layout (&frame);
8639 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8640 return frame.hard_frame_pointer_offset;
8641 else if (from == FRAME_POINTER_REGNUM
8642 && to == HARD_FRAME_POINTER_REGNUM)
8643 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
8646 gcc_assert (to == STACK_POINTER_REGNUM);
8648 if (from == ARG_POINTER_REGNUM)
8649 return frame.stack_pointer_offset;
8651 gcc_assert (from == FRAME_POINTER_REGNUM);
8652 return frame.stack_pointer_offset - frame.frame_pointer_offset;
8656 /* In a dynamically-aligned function, we can't know the offset from
8657 stack pointer to frame pointer, so we must ensure that setjmp
8658 eliminates fp against the hard fp (%ebp) rather than trying to
8659 index from %esp up to the top of the frame across a gap that is
8660 of unknown (at compile-time) size. */
8662 ix86_builtin_setjmp_frame_value (void)
8664 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
8667 /* When using -fsplit-stack, the allocation routines set a field in
8668 the TCB to the bottom of the stack plus this much space, measured
8671 #define SPLIT_STACK_AVAILABLE 256
8673 /* Fill structure ix86_frame about frame of currently computed function. */
8676 ix86_compute_frame_layout (struct ix86_frame *frame)
8678 unsigned int stack_alignment_needed;
8679 HOST_WIDE_INT offset;
8680 unsigned int preferred_alignment;
8681 HOST_WIDE_INT size = get_frame_size ();
8682 HOST_WIDE_INT to_allocate;
8684 frame->nregs = ix86_nsaved_regs ();
8685 frame->nsseregs = ix86_nsaved_sseregs ();
8687 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8688 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8690 /* 64-bit MS ABI seem to require stack alignment to be always 16 except for
8691 function prologues and leaf. */
8692 if ((TARGET_64BIT_MS_ABI && preferred_alignment < 16)
8693 && (!current_function_is_leaf || cfun->calls_alloca != 0
8694 || ix86_current_function_calls_tls_descriptor))
8696 preferred_alignment = 16;
8697 stack_alignment_needed = 16;
8698 crtl->preferred_stack_boundary = 128;
8699 crtl->stack_alignment_needed = 128;
8702 gcc_assert (!size || stack_alignment_needed);
8703 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8704 gcc_assert (preferred_alignment <= stack_alignment_needed);
8706 /* For SEH we have to limit the amount of code movement into the prologue.
8707 At present we do this via a BLOCKAGE, at which point there's very little
8708 scheduling that can be done, which means that there's very little point
8709 in doing anything except PUSHs. */
8711 cfun->machine->use_fast_prologue_epilogue = false;
8713 /* During reload iteration the amount of registers saved can change.
8714 Recompute the value as needed. Do not recompute when amount of registers
8715 didn't change as reload does multiple calls to the function and does not
8716 expect the decision to change within single iteration. */
8717 else if (!optimize_function_for_size_p (cfun)
8718 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8720 int count = frame->nregs;
8721 struct cgraph_node *node = cgraph_get_node (current_function_decl);
8723 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8725 /* The fast prologue uses move instead of push to save registers. This
8726 is significantly longer, but also executes faster as modern hardware
8727 can execute the moves in parallel, but can't do that for push/pop.
8729 Be careful about choosing what prologue to emit: When function takes
8730 many instructions to execute we may use slow version as well as in
8731 case function is known to be outside hot spot (this is known with
8732 feedback only). Weight the size of function by number of registers
8733 to save as it is cheap to use one or two push instructions but very
8734 slow to use many of them. */
8736 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8737 if (node->frequency < NODE_FREQUENCY_NORMAL
8738 || (flag_branch_probabilities
8739 && node->frequency < NODE_FREQUENCY_HOT))
8740 cfun->machine->use_fast_prologue_epilogue = false;
8742 cfun->machine->use_fast_prologue_epilogue
8743 = !expensive_function_p (count);
8745 if (TARGET_PROLOGUE_USING_MOVE
8746 && cfun->machine->use_fast_prologue_epilogue)
8747 frame->save_regs_using_mov = true;
8749 frame->save_regs_using_mov = false;
8751 /* If static stack checking is enabled and done with probes, the registers
8752 need to be saved before allocating the frame. */
8753 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
8754 frame->save_regs_using_mov = false;
8756 /* Skip return address. */
8757 offset = UNITS_PER_WORD;
8759 /* Skip pushed static chain. */
8760 if (ix86_static_chain_on_stack)
8761 offset += UNITS_PER_WORD;
8763 /* Skip saved base pointer. */
8764 if (frame_pointer_needed)
8765 offset += UNITS_PER_WORD;
8766 frame->hfp_save_offset = offset;
8768 /* The traditional frame pointer location is at the top of the frame. */
8769 frame->hard_frame_pointer_offset = offset;
8771 /* Register save area */
8772 offset += frame->nregs * UNITS_PER_WORD;
8773 frame->reg_save_offset = offset;
8775 /* Align and set SSE register save area. */
8776 if (frame->nsseregs)
8778 /* The only ABI that has saved SSE registers (Win64) also has a
8779 16-byte aligned default stack, and thus we don't need to be
8780 within the re-aligned local stack frame to save them. */
8781 gcc_assert (INCOMING_STACK_BOUNDARY >= 128);
8782 offset = (offset + 16 - 1) & -16;
8783 offset += frame->nsseregs * 16;
8785 frame->sse_reg_save_offset = offset;
8787 /* The re-aligned stack starts here. Values before this point are not
8788 directly comparable with values below this point. In order to make
8789 sure that no value happens to be the same before and after, force
8790 the alignment computation below to add a non-zero value. */
8791 if (stack_realign_fp)
8792 offset = (offset + stack_alignment_needed) & -stack_alignment_needed;
8795 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8796 offset += frame->va_arg_size;
8798 /* Align start of frame for local function. */
8799 if (stack_realign_fp
8800 || offset != frame->sse_reg_save_offset
8802 || !current_function_is_leaf
8803 || cfun->calls_alloca
8804 || ix86_current_function_calls_tls_descriptor)
8805 offset = (offset + stack_alignment_needed - 1) & -stack_alignment_needed;
8807 /* Frame pointer points here. */
8808 frame->frame_pointer_offset = offset;
8812 /* Add outgoing arguments area. Can be skipped if we eliminated
8813 all the function calls as dead code.
8814 Skipping is however impossible when function calls alloca. Alloca
8815 expander assumes that last crtl->outgoing_args_size
8816 of stack frame are unused. */
8817 if (ACCUMULATE_OUTGOING_ARGS
8818 && (!current_function_is_leaf || cfun->calls_alloca
8819 || ix86_current_function_calls_tls_descriptor))
8821 offset += crtl->outgoing_args_size;
8822 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8825 frame->outgoing_arguments_size = 0;
8827 /* Align stack boundary. Only needed if we're calling another function
8829 if (!current_function_is_leaf || cfun->calls_alloca
8830 || ix86_current_function_calls_tls_descriptor)
8831 offset = (offset + preferred_alignment - 1) & -preferred_alignment;
8833 /* We've reached end of stack frame. */
8834 frame->stack_pointer_offset = offset;
8836 /* Size prologue needs to allocate. */
8837 to_allocate = offset - frame->sse_reg_save_offset;
8839 if ((!to_allocate && frame->nregs <= 1)
8840 || (TARGET_64BIT && to_allocate >= (HOST_WIDE_INT) 0x80000000))
8841 frame->save_regs_using_mov = false;
8843 if (ix86_using_red_zone ()
8844 && current_function_sp_is_unchanging
8845 && current_function_is_leaf
8846 && !ix86_current_function_calls_tls_descriptor)
8848 frame->red_zone_size = to_allocate;
8849 if (frame->save_regs_using_mov)
8850 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8851 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8852 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8855 frame->red_zone_size = 0;
8856 frame->stack_pointer_offset -= frame->red_zone_size;
8858 /* The SEH frame pointer location is near the bottom of the frame.
8859 This is enforced by the fact that the difference between the
8860 stack pointer and the frame pointer is limited to 240 bytes in
8861 the unwind data structure. */
8866 /* If we can leave the frame pointer where it is, do so. */
8867 diff = frame->stack_pointer_offset - frame->hard_frame_pointer_offset;
8868 if (diff > 240 || (diff & 15) != 0)
8870 /* Ideally we'd determine what portion of the local stack frame
8871 (within the constraint of the lowest 240) is most heavily used.
8872 But without that complication, simply bias the frame pointer
8873 by 128 bytes so as to maximize the amount of the local stack
8874 frame that is addressable with 8-bit offsets. */
8875 frame->hard_frame_pointer_offset = frame->stack_pointer_offset - 128;
8880 /* This is semi-inlined memory_address_length, but simplified
8881 since we know that we're always dealing with reg+offset, and
8882 to avoid having to create and discard all that rtl. */
8885 choose_baseaddr_len (unsigned int regno, HOST_WIDE_INT offset)
8891 /* EBP and R13 cannot be encoded without an offset. */
8892 len = (regno == BP_REG || regno == R13_REG);
8894 else if (IN_RANGE (offset, -128, 127))
8897 /* ESP and R12 must be encoded with a SIB byte. */
8898 if (regno == SP_REG || regno == R12_REG)
8904 /* Return an RTX that points to CFA_OFFSET within the stack frame.
8905 The valid base registers are taken from CFUN->MACHINE->FS. */
8908 choose_baseaddr (HOST_WIDE_INT cfa_offset)
8910 const struct machine_function *m = cfun->machine;
8911 rtx base_reg = NULL;
8912 HOST_WIDE_INT base_offset = 0;
8914 if (m->use_fast_prologue_epilogue)
8916 /* Choose the base register most likely to allow the most scheduling
8917 opportunities. Generally FP is valid througout the function,
8918 while DRAP must be reloaded within the epilogue. But choose either
8919 over the SP due to increased encoding size. */
8923 base_reg = hard_frame_pointer_rtx;
8924 base_offset = m->fs.fp_offset - cfa_offset;
8926 else if (m->fs.drap_valid)
8928 base_reg = crtl->drap_reg;
8929 base_offset = 0 - cfa_offset;
8931 else if (m->fs.sp_valid)
8933 base_reg = stack_pointer_rtx;
8934 base_offset = m->fs.sp_offset - cfa_offset;
8939 HOST_WIDE_INT toffset;
8942 /* Choose the base register with the smallest address encoding.
8943 With a tie, choose FP > DRAP > SP. */
8946 base_reg = stack_pointer_rtx;
8947 base_offset = m->fs.sp_offset - cfa_offset;
8948 len = choose_baseaddr_len (STACK_POINTER_REGNUM, base_offset);
8950 if (m->fs.drap_valid)
8952 toffset = 0 - cfa_offset;
8953 tlen = choose_baseaddr_len (REGNO (crtl->drap_reg), toffset);
8956 base_reg = crtl->drap_reg;
8957 base_offset = toffset;
8963 toffset = m->fs.fp_offset - cfa_offset;
8964 tlen = choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM, toffset);
8967 base_reg = hard_frame_pointer_rtx;
8968 base_offset = toffset;
8973 gcc_assert (base_reg != NULL);
8975 return plus_constant (base_reg, base_offset);
8978 /* Emit code to save registers in the prologue. */
8981 ix86_emit_save_regs (void)
8986 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8987 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8989 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8990 RTX_FRAME_RELATED_P (insn) = 1;
8994 /* Emit a single register save at CFA - CFA_OFFSET. */
8997 ix86_emit_save_reg_using_mov (enum machine_mode mode, unsigned int regno,
8998 HOST_WIDE_INT cfa_offset)
9000 struct machine_function *m = cfun->machine;
9001 rtx reg = gen_rtx_REG (mode, regno);
9002 rtx mem, addr, base, insn;
9004 addr = choose_baseaddr (cfa_offset);
9005 mem = gen_frame_mem (mode, addr);
9007 /* For SSE saves, we need to indicate the 128-bit alignment. */
9008 set_mem_align (mem, GET_MODE_ALIGNMENT (mode));
9010 insn = emit_move_insn (mem, reg);
9011 RTX_FRAME_RELATED_P (insn) = 1;
9014 if (GET_CODE (base) == PLUS)
9015 base = XEXP (base, 0);
9016 gcc_checking_assert (REG_P (base));
9018 /* When saving registers into a re-aligned local stack frame, avoid
9019 any tricky guessing by dwarf2out. */
9020 if (m->fs.realigned)
9022 gcc_checking_assert (stack_realign_drap);
9024 if (regno == REGNO (crtl->drap_reg))
9026 /* A bit of a hack. We force the DRAP register to be saved in
9027 the re-aligned stack frame, which provides us with a copy
9028 of the CFA that will last past the prologue. Install it. */
9029 gcc_checking_assert (cfun->machine->fs.fp_valid);
9030 addr = plus_constant (hard_frame_pointer_rtx,
9031 cfun->machine->fs.fp_offset - cfa_offset);
9032 mem = gen_rtx_MEM (mode, addr);
9033 add_reg_note (insn, REG_CFA_DEF_CFA, mem);
9037 /* The frame pointer is a stable reference within the
9038 aligned frame. Use it. */
9039 gcc_checking_assert (cfun->machine->fs.fp_valid);
9040 addr = plus_constant (hard_frame_pointer_rtx,
9041 cfun->machine->fs.fp_offset - cfa_offset);
9042 mem = gen_rtx_MEM (mode, addr);
9043 add_reg_note (insn, REG_CFA_EXPRESSION,
9044 gen_rtx_SET (VOIDmode, mem, reg));
9048 /* The memory may not be relative to the current CFA register,
9049 which means that we may need to generate a new pattern for
9050 use by the unwind info. */
9051 else if (base != m->fs.cfa_reg)
9053 addr = plus_constant (m->fs.cfa_reg, m->fs.cfa_offset - cfa_offset);
9054 mem = gen_rtx_MEM (mode, addr);
9055 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, mem, reg));
9059 /* Emit code to save registers using MOV insns.
9060 First register is stored at CFA - CFA_OFFSET. */
9062 ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset)
9066 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9067 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9069 ix86_emit_save_reg_using_mov (Pmode, regno, cfa_offset);
9070 cfa_offset -= UNITS_PER_WORD;
9074 /* Emit code to save SSE registers using MOV insns.
9075 First register is stored at CFA - CFA_OFFSET. */
9077 ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset)
9081 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9082 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
9084 ix86_emit_save_reg_using_mov (V4SFmode, regno, cfa_offset);
9089 static GTY(()) rtx queued_cfa_restores;
9091 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
9092 manipulation insn. The value is on the stack at CFA - CFA_OFFSET.
9093 Don't add the note if the previously saved value will be left untouched
9094 within stack red-zone till return, as unwinders can find the same value
9095 in the register and on the stack. */
9098 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT cfa_offset)
9100 if (cfa_offset <= cfun->machine->fs.red_zone_offset)
9105 add_reg_note (insn, REG_CFA_RESTORE, reg);
9106 RTX_FRAME_RELATED_P (insn) = 1;
9110 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
9113 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
9116 ix86_add_queued_cfa_restore_notes (rtx insn)
9119 if (!queued_cfa_restores)
9121 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
9123 XEXP (last, 1) = REG_NOTES (insn);
9124 REG_NOTES (insn) = queued_cfa_restores;
9125 queued_cfa_restores = NULL_RTX;
9126 RTX_FRAME_RELATED_P (insn) = 1;
9129 /* Expand prologue or epilogue stack adjustment.
9130 The pattern exist to put a dependency on all ebp-based memory accesses.
9131 STYLE should be negative if instructions should be marked as frame related,
9132 zero if %r11 register is live and cannot be freely used and positive
9136 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
9137 int style, bool set_cfa)
9139 struct machine_function *m = cfun->machine;
9141 bool add_frame_related_expr = false;
9144 insn = gen_pro_epilogue_adjust_stack_si_add (dest, src, offset);
9145 else if (x86_64_immediate_operand (offset, DImode))
9146 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, offset);
9150 /* r11 is used by indirect sibcall return as well, set before the
9151 epilogue and used after the epilogue. */
9153 tmp = gen_rtx_REG (DImode, R11_REG);
9156 gcc_assert (src != hard_frame_pointer_rtx
9157 && dest != hard_frame_pointer_rtx);
9158 tmp = hard_frame_pointer_rtx;
9160 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
9162 add_frame_related_expr = true;
9164 insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, tmp);
9167 insn = emit_insn (insn);
9169 ix86_add_queued_cfa_restore_notes (insn);
9175 gcc_assert (m->fs.cfa_reg == src);
9176 m->fs.cfa_offset += INTVAL (offset);
9177 m->fs.cfa_reg = dest;
9179 r = gen_rtx_PLUS (Pmode, src, offset);
9180 r = gen_rtx_SET (VOIDmode, dest, r);
9181 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9182 RTX_FRAME_RELATED_P (insn) = 1;
9186 RTX_FRAME_RELATED_P (insn) = 1;
9187 if (add_frame_related_expr)
9189 rtx r = gen_rtx_PLUS (Pmode, src, offset);
9190 r = gen_rtx_SET (VOIDmode, dest, r);
9191 add_reg_note (insn, REG_FRAME_RELATED_EXPR, r);
9195 if (dest == stack_pointer_rtx)
9197 HOST_WIDE_INT ooffset = m->fs.sp_offset;
9198 bool valid = m->fs.sp_valid;
9200 if (src == hard_frame_pointer_rtx)
9202 valid = m->fs.fp_valid;
9203 ooffset = m->fs.fp_offset;
9205 else if (src == crtl->drap_reg)
9207 valid = m->fs.drap_valid;
9212 /* Else there are two possibilities: SP itself, which we set
9213 up as the default above. Or EH_RETURN_STACKADJ_RTX, which is
9214 taken care of this by hand along the eh_return path. */
9215 gcc_checking_assert (src == stack_pointer_rtx
9216 || offset == const0_rtx);
9219 m->fs.sp_offset = ooffset - INTVAL (offset);
9220 m->fs.sp_valid = valid;
9224 /* Find an available register to be used as dynamic realign argument
9225 pointer regsiter. Such a register will be written in prologue and
9226 used in begin of body, so it must not be
9227 1. parameter passing register.
9229 We reuse static-chain register if it is available. Otherwise, we
9230 use DI for i386 and R13 for x86-64. We chose R13 since it has
9233 Return: the regno of chosen register. */
9236 find_drap_reg (void)
9238 tree decl = cfun->decl;
9242 /* Use R13 for nested function or function need static chain.
9243 Since function with tail call may use any caller-saved
9244 registers in epilogue, DRAP must not use caller-saved
9245 register in such case. */
9246 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9253 /* Use DI for nested function or function need static chain.
9254 Since function with tail call may use any caller-saved
9255 registers in epilogue, DRAP must not use caller-saved
9256 register in such case. */
9257 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
9260 /* Reuse static chain register if it isn't used for parameter
9262 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2)
9264 unsigned int ccvt = ix86_get_callcvt (TREE_TYPE (decl));
9265 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) == 0)
9272 /* Return minimum incoming stack alignment. */
9275 ix86_minimum_incoming_stack_boundary (bool sibcall)
9277 unsigned int incoming_stack_boundary;
9279 /* Prefer the one specified at command line. */
9280 if (ix86_user_incoming_stack_boundary)
9281 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
9282 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
9283 if -mstackrealign is used, it isn't used for sibcall check and
9284 estimated stack alignment is 128bit. */
9287 && ix86_force_align_arg_pointer
9288 && crtl->stack_alignment_estimated == 128)
9289 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9291 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
9293 /* Incoming stack alignment can be changed on individual functions
9294 via force_align_arg_pointer attribute. We use the smallest
9295 incoming stack boundary. */
9296 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
9297 && lookup_attribute (ix86_force_align_arg_pointer_string,
9298 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
9299 incoming_stack_boundary = MIN_STACK_BOUNDARY;
9301 /* The incoming stack frame has to be aligned at least at
9302 parm_stack_boundary. */
9303 if (incoming_stack_boundary < crtl->parm_stack_boundary)
9304 incoming_stack_boundary = crtl->parm_stack_boundary;
9306 /* Stack at entrance of main is aligned by runtime. We use the
9307 smallest incoming stack boundary. */
9308 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
9309 && DECL_NAME (current_function_decl)
9310 && MAIN_NAME_P (DECL_NAME (current_function_decl))
9311 && DECL_FILE_SCOPE_P (current_function_decl))
9312 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
9314 return incoming_stack_boundary;
9317 /* Update incoming stack boundary and estimated stack alignment. */
9320 ix86_update_stack_boundary (void)
9322 ix86_incoming_stack_boundary
9323 = ix86_minimum_incoming_stack_boundary (false);
9325 /* x86_64 vararg needs 16byte stack alignment for register save
9329 && crtl->stack_alignment_estimated < 128)
9330 crtl->stack_alignment_estimated = 128;
9333 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
9334 needed or an rtx for DRAP otherwise. */
9337 ix86_get_drap_rtx (void)
9339 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
9340 crtl->need_drap = true;
9342 if (stack_realign_drap)
9344 /* Assign DRAP to vDRAP and returns vDRAP */
9345 unsigned int regno = find_drap_reg ();
9350 arg_ptr = gen_rtx_REG (Pmode, regno);
9351 crtl->drap_reg = arg_ptr;
9354 drap_vreg = copy_to_reg (arg_ptr);
9358 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
9361 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
9362 RTX_FRAME_RELATED_P (insn) = 1;
9370 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
9373 ix86_internal_arg_pointer (void)
9375 return virtual_incoming_args_rtx;
9378 struct scratch_reg {
9383 /* Return a short-lived scratch register for use on function entry.
9384 In 32-bit mode, it is valid only after the registers are saved
9385 in the prologue. This register must be released by means of
9386 release_scratch_register_on_entry once it is dead. */
9389 get_scratch_register_on_entry (struct scratch_reg *sr)
9397 /* We always use R11 in 64-bit mode. */
9402 tree decl = current_function_decl, fntype = TREE_TYPE (decl);
9404 = lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
9405 bool static_chain_p = DECL_STATIC_CHAIN (decl);
9406 int regparm = ix86_function_regparm (fntype, decl);
9408 = crtl->drap_reg ? REGNO (crtl->drap_reg) : INVALID_REGNUM;
9410 /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
9411 for the static chain register. */
9412 if ((regparm < 1 || (fastcall_p && !static_chain_p))
9413 && drap_regno != AX_REG)
9415 else if (regparm < 2 && drap_regno != DX_REG)
9417 /* ecx is the static chain register. */
9418 else if (regparm < 3 && !fastcall_p && !static_chain_p
9419 && drap_regno != CX_REG)
9421 else if (ix86_save_reg (BX_REG, true))
9423 /* esi is the static chain register. */
9424 else if (!(regparm == 3 && static_chain_p)
9425 && ix86_save_reg (SI_REG, true))
9427 else if (ix86_save_reg (DI_REG, true))
9431 regno = (drap_regno == AX_REG ? DX_REG : AX_REG);
9436 sr->reg = gen_rtx_REG (Pmode, regno);
9439 rtx insn = emit_insn (gen_push (sr->reg));
9440 RTX_FRAME_RELATED_P (insn) = 1;
9444 /* Release a scratch register obtained from the preceding function. */
9447 release_scratch_register_on_entry (struct scratch_reg *sr)
9451 rtx x, insn = emit_insn (gen_pop (sr->reg));
9453 /* The RTX_FRAME_RELATED_P mechanism doesn't know about pop. */
9454 RTX_FRAME_RELATED_P (insn) = 1;
9455 x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
9456 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
9457 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
9461 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
9463 /* Emit code to adjust the stack pointer by SIZE bytes while probing it. */
9466 ix86_adjust_stack_and_probe (const HOST_WIDE_INT size)
9468 /* We skip the probe for the first interval + a small dope of 4 words and
9469 probe that many bytes past the specified size to maintain a protection
9470 area at the botton of the stack. */
9471 const int dope = 4 * UNITS_PER_WORD;
9472 rtx size_rtx = GEN_INT (size), last;
9474 /* See if we have a constant small number of probes to generate. If so,
9475 that's the easy case. The run-time loop is made up of 11 insns in the
9476 generic case while the compile-time loop is made up of 3+2*(n-1) insns
9477 for n # of intervals. */
9478 if (size <= 5 * PROBE_INTERVAL)
9480 HOST_WIDE_INT i, adjust;
9481 bool first_probe = true;
9483 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
9484 values of N from 1 until it exceeds SIZE. If only one probe is
9485 needed, this will not generate any code. Then adjust and probe
9486 to PROBE_INTERVAL + SIZE. */
9487 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
9491 adjust = 2 * PROBE_INTERVAL + dope;
9492 first_probe = false;
9495 adjust = PROBE_INTERVAL;
9497 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9498 plus_constant (stack_pointer_rtx, -adjust)));
9499 emit_stack_probe (stack_pointer_rtx);
9503 adjust = size + PROBE_INTERVAL + dope;
9505 adjust = size + PROBE_INTERVAL - i;
9507 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9508 plus_constant (stack_pointer_rtx, -adjust)));
9509 emit_stack_probe (stack_pointer_rtx);
9511 /* Adjust back to account for the additional first interval. */
9512 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9513 plus_constant (stack_pointer_rtx,
9514 PROBE_INTERVAL + dope)));
9517 /* Otherwise, do the same as above, but in a loop. Note that we must be
9518 extra careful with variables wrapping around because we might be at
9519 the very top (or the very bottom) of the address space and we have
9520 to be able to handle this case properly; in particular, we use an
9521 equality test for the loop condition. */
9524 HOST_WIDE_INT rounded_size;
9525 struct scratch_reg sr;
9527 get_scratch_register_on_entry (&sr);
9530 /* Step 1: round SIZE to the previous multiple of the interval. */
9532 rounded_size = size & -PROBE_INTERVAL;
9535 /* Step 2: compute initial and final value of the loop counter. */
9537 /* SP = SP_0 + PROBE_INTERVAL. */
9538 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9539 plus_constant (stack_pointer_rtx,
9540 - (PROBE_INTERVAL + dope))));
9542 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
9543 emit_move_insn (sr.reg, GEN_INT (-rounded_size));
9544 emit_insn (gen_rtx_SET (VOIDmode, sr.reg,
9545 gen_rtx_PLUS (Pmode, sr.reg,
9546 stack_pointer_rtx)));
9551 while (SP != LAST_ADDR)
9553 SP = SP + PROBE_INTERVAL
9557 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
9558 values of N from 1 until it is equal to ROUNDED_SIZE. */
9560 emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg, size_rtx));
9563 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
9564 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
9566 if (size != rounded_size)
9568 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9569 plus_constant (stack_pointer_rtx,
9570 rounded_size - size)));
9571 emit_stack_probe (stack_pointer_rtx);
9574 /* Adjust back to account for the additional first interval. */
9575 last = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9576 plus_constant (stack_pointer_rtx,
9577 PROBE_INTERVAL + dope)));
9579 release_scratch_register_on_entry (&sr);
9582 gcc_assert (cfun->machine->fs.cfa_reg != stack_pointer_rtx);
9584 /* Even if the stack pointer isn't the CFA register, we need to correctly
9585 describe the adjustments made to it, in particular differentiate the
9586 frame-related ones from the frame-unrelated ones. */
9589 rtx expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
9590 XVECEXP (expr, 0, 0)
9591 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9592 plus_constant (stack_pointer_rtx, -size));
9593 XVECEXP (expr, 0, 1)
9594 = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9595 plus_constant (stack_pointer_rtx,
9596 PROBE_INTERVAL + dope + size));
9597 add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
9598 RTX_FRAME_RELATED_P (last) = 1;
9600 cfun->machine->fs.sp_offset += size;
9603 /* Make sure nothing is scheduled before we are done. */
9604 emit_insn (gen_blockage ());
9607 /* Adjust the stack pointer up to REG while probing it. */
9610 output_adjust_stack_and_probe (rtx reg)
9612 static int labelno = 0;
9613 char loop_lab[32], end_lab[32];
9616 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
9617 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
9619 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
9621 /* Jump to END_LAB if SP == LAST_ADDR. */
9622 xops[0] = stack_pointer_rtx;
9624 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
9625 fputs ("\tje\t", asm_out_file);
9626 assemble_name_raw (asm_out_file, end_lab);
9627 fputc ('\n', asm_out_file);
9629 /* SP = SP + PROBE_INTERVAL. */
9630 xops[1] = GEN_INT (PROBE_INTERVAL);
9631 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
9634 xops[1] = const0_rtx;
9635 output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops);
9637 fprintf (asm_out_file, "\tjmp\t");
9638 assemble_name_raw (asm_out_file, loop_lab);
9639 fputc ('\n', asm_out_file);
9641 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
9646 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
9647 inclusive. These are offsets from the current stack pointer. */
9650 ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
9652 /* See if we have a constant small number of probes to generate. If so,
9653 that's the easy case. The run-time loop is made up of 7 insns in the
9654 generic case while the compile-time loop is made up of n insns for n #
9656 if (size <= 7 * PROBE_INTERVAL)
9660 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
9661 it exceeds SIZE. If only one probe is needed, this will not
9662 generate any code. Then probe at FIRST + SIZE. */
9663 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
9664 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + i)));
9666 emit_stack_probe (plus_constant (stack_pointer_rtx, -(first + size)));
9669 /* Otherwise, do the same as above, but in a loop. Note that we must be
9670 extra careful with variables wrapping around because we might be at
9671 the very top (or the very bottom) of the address space and we have
9672 to be able to handle this case properly; in particular, we use an
9673 equality test for the loop condition. */
9676 HOST_WIDE_INT rounded_size, last;
9677 struct scratch_reg sr;
9679 get_scratch_register_on_entry (&sr);
9682 /* Step 1: round SIZE to the previous multiple of the interval. */
9684 rounded_size = size & -PROBE_INTERVAL;
9687 /* Step 2: compute initial and final value of the loop counter. */
9689 /* TEST_OFFSET = FIRST. */
9690 emit_move_insn (sr.reg, GEN_INT (-first));
9692 /* LAST_OFFSET = FIRST + ROUNDED_SIZE. */
9693 last = first + rounded_size;
9698 while (TEST_ADDR != LAST_ADDR)
9700 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
9704 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
9705 until it is equal to ROUNDED_SIZE. */
9707 emit_insn (ix86_gen_probe_stack_range (sr.reg, sr.reg, GEN_INT (-last)));
9710 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
9711 that SIZE is equal to ROUNDED_SIZE. */
9713 if (size != rounded_size)
9714 emit_stack_probe (plus_constant (gen_rtx_PLUS (Pmode,
9717 rounded_size - size));
9719 release_scratch_register_on_entry (&sr);
9722 /* Make sure nothing is scheduled before we are done. */
9723 emit_insn (gen_blockage ());
9726 /* Probe a range of stack addresses from REG to END, inclusive. These are
9727 offsets from the current stack pointer. */
9730 output_probe_stack_range (rtx reg, rtx end)
9732 static int labelno = 0;
9733 char loop_lab[32], end_lab[32];
9736 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
9737 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
9739 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
9741 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
9744 output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);
9745 fputs ("\tje\t", asm_out_file);
9746 assemble_name_raw (asm_out_file, end_lab);
9747 fputc ('\n', asm_out_file);
9749 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
9750 xops[1] = GEN_INT (PROBE_INTERVAL);
9751 output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);
9753 /* Probe at TEST_ADDR. */
9754 xops[0] = stack_pointer_rtx;
9756 xops[2] = const0_rtx;
9757 output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops);
9759 fprintf (asm_out_file, "\tjmp\t");
9760 assemble_name_raw (asm_out_file, loop_lab);
9761 fputc ('\n', asm_out_file);
9763 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
9768 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
9769 to be generated in correct form. */
9771 ix86_finalize_stack_realign_flags (void)
9773 /* Check if stack realign is really needed after reload, and
9774 stores result in cfun */
9775 unsigned int incoming_stack_boundary
9776 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
9777 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
9778 unsigned int stack_realign = (incoming_stack_boundary
9779 < (current_function_is_leaf
9780 ? crtl->max_used_stack_slot_alignment
9781 : crtl->stack_alignment_needed));
9783 if (crtl->stack_realign_finalized)
9785 /* After stack_realign_needed is finalized, we can't no longer
9787 gcc_assert (crtl->stack_realign_needed == stack_realign);
9791 crtl->stack_realign_needed = stack_realign;
9792 crtl->stack_realign_finalized = true;
9796 /* Expand the prologue into a bunch of separate insns. */
9799 ix86_expand_prologue (void)
9801 struct machine_function *m = cfun->machine;
9804 struct ix86_frame frame;
9805 HOST_WIDE_INT allocate;
9806 bool int_registers_saved;
9808 ix86_finalize_stack_realign_flags ();
9810 /* DRAP should not coexist with stack_realign_fp */
9811 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
9813 memset (&m->fs, 0, sizeof (m->fs));
9815 /* Initialize CFA state for before the prologue. */
9816 m->fs.cfa_reg = stack_pointer_rtx;
9817 m->fs.cfa_offset = INCOMING_FRAME_SP_OFFSET;
9819 /* Track SP offset to the CFA. We continue tracking this after we've
9820 swapped the CFA register away from SP. In the case of re-alignment
9821 this is fudged; we're interested to offsets within the local frame. */
9822 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
9823 m->fs.sp_valid = true;
9825 ix86_compute_frame_layout (&frame);
9827 if (!TARGET_64BIT && ix86_function_ms_hook_prologue (current_function_decl))
9829 /* We should have already generated an error for any use of
9830 ms_hook on a nested function. */
9831 gcc_checking_assert (!ix86_static_chain_on_stack);
9833 /* Check if profiling is active and we shall use profiling before
9834 prologue variant. If so sorry. */
9835 if (crtl->profile && flag_fentry != 0)
9836 sorry ("ms_hook_prologue attribute isn%'t compatible "
9837 "with -mfentry for 32-bit");
9839 /* In ix86_asm_output_function_label we emitted:
9840 8b ff movl.s %edi,%edi
9842 8b ec movl.s %esp,%ebp
9844 This matches the hookable function prologue in Win32 API
9845 functions in Microsoft Windows XP Service Pack 2 and newer.
9846 Wine uses this to enable Windows apps to hook the Win32 API
9847 functions provided by Wine.
9849 What that means is that we've already set up the frame pointer. */
9851 if (frame_pointer_needed
9852 && !(crtl->drap_reg && crtl->stack_realign_needed))
9856 /* We've decided to use the frame pointer already set up.
9857 Describe this to the unwinder by pretending that both
9858 push and mov insns happen right here.
9860 Putting the unwind info here at the end of the ms_hook
9861 is done so that we can make absolutely certain we get
9862 the required byte sequence at the start of the function,
9863 rather than relying on an assembler that can produce
9864 the exact encoding required.
9866 However it does mean (in the unpatched case) that we have
9867 a 1 insn window where the asynchronous unwind info is
9868 incorrect. However, if we placed the unwind info at
9869 its correct location we would have incorrect unwind info
9870 in the patched case. Which is probably all moot since
9871 I don't expect Wine generates dwarf2 unwind info for the
9872 system libraries that use this feature. */
9874 insn = emit_insn (gen_blockage ());
9876 push = gen_push (hard_frame_pointer_rtx);
9877 mov = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
9879 RTX_FRAME_RELATED_P (push) = 1;
9880 RTX_FRAME_RELATED_P (mov) = 1;
9882 RTX_FRAME_RELATED_P (insn) = 1;
9883 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
9884 gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, push, mov)));
9886 /* Note that gen_push incremented m->fs.cfa_offset, even
9887 though we didn't emit the push insn here. */
9888 m->fs.cfa_reg = hard_frame_pointer_rtx;
9889 m->fs.fp_offset = m->fs.cfa_offset;
9890 m->fs.fp_valid = true;
9894 /* The frame pointer is not needed so pop %ebp again.
9895 This leaves us with a pristine state. */
9896 emit_insn (gen_pop (hard_frame_pointer_rtx));
9900 /* The first insn of a function that accepts its static chain on the
9901 stack is to push the register that would be filled in by a direct
9902 call. This insn will be skipped by the trampoline. */
9903 else if (ix86_static_chain_on_stack)
9905 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
9906 emit_insn (gen_blockage ());
9908 /* We don't want to interpret this push insn as a register save,
9909 only as a stack adjustment. The real copy of the register as
9910 a save will be done later, if needed. */
9911 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
9912 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
9913 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
9914 RTX_FRAME_RELATED_P (insn) = 1;
9917 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
9918 of DRAP is needed and stack realignment is really needed after reload */
9919 if (stack_realign_drap)
9921 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
9923 /* Only need to push parameter pointer reg if it is caller saved. */
9924 if (!call_used_regs[REGNO (crtl->drap_reg)])
9926 /* Push arg pointer reg */
9927 insn = emit_insn (gen_push (crtl->drap_reg));
9928 RTX_FRAME_RELATED_P (insn) = 1;
9931 /* Grab the argument pointer. */
9932 t = plus_constant (stack_pointer_rtx, m->fs.sp_offset);
9933 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
9934 RTX_FRAME_RELATED_P (insn) = 1;
9935 m->fs.cfa_reg = crtl->drap_reg;
9936 m->fs.cfa_offset = 0;
9938 /* Align the stack. */
9939 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
9941 GEN_INT (-align_bytes)));
9942 RTX_FRAME_RELATED_P (insn) = 1;
9944 /* Replicate the return address on the stack so that return
9945 address can be reached via (argp - 1) slot. This is needed
9946 to implement macro RETURN_ADDR_RTX and intrinsic function
9947 expand_builtin_return_addr etc. */
9948 t = plus_constant (crtl->drap_reg, -UNITS_PER_WORD);
9949 t = gen_frame_mem (Pmode, t);
9950 insn = emit_insn (gen_push (t));
9951 RTX_FRAME_RELATED_P (insn) = 1;
9953 /* For the purposes of frame and register save area addressing,
9954 we've started over with a new frame. */
9955 m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
9956 m->fs.realigned = true;
9959 if (frame_pointer_needed && !m->fs.fp_valid)
9961 /* Note: AT&T enter does NOT have reversed args. Enter is probably
9962 slower on all targets. Also sdb doesn't like it. */
9963 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
9964 RTX_FRAME_RELATED_P (insn) = 1;
9966 if (m->fs.sp_offset == frame.hard_frame_pointer_offset)
9968 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
9969 RTX_FRAME_RELATED_P (insn) = 1;
9971 if (m->fs.cfa_reg == stack_pointer_rtx)
9972 m->fs.cfa_reg = hard_frame_pointer_rtx;
9973 m->fs.fp_offset = m->fs.sp_offset;
9974 m->fs.fp_valid = true;
9978 int_registers_saved = (frame.nregs == 0);
9980 if (!int_registers_saved)
9982 /* If saving registers via PUSH, do so now. */
9983 if (!frame.save_regs_using_mov)
9985 ix86_emit_save_regs ();
9986 int_registers_saved = true;
9987 gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
9990 /* When using red zone we may start register saving before allocating
9991 the stack frame saving one cycle of the prologue. However, avoid
9992 doing this if we have to probe the stack; at least on x86_64 the
9993 stack probe can turn into a call that clobbers a red zone location. */
9994 else if (ix86_using_red_zone ()
9995 && (! TARGET_STACK_PROBE
9996 || frame.stack_pointer_offset < CHECK_STACK_LIMIT))
9998 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
9999 int_registers_saved = true;
10003 if (stack_realign_fp)
10005 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
10006 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
10008 /* The computation of the size of the re-aligned stack frame means
10009 that we must allocate the size of the register save area before
10010 performing the actual alignment. Otherwise we cannot guarantee
10011 that there's enough storage above the realignment point. */
10012 if (m->fs.sp_offset != frame.sse_reg_save_offset)
10013 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10014 GEN_INT (m->fs.sp_offset
10015 - frame.sse_reg_save_offset),
10018 /* Align the stack. */
10019 insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
10021 GEN_INT (-align_bytes)));
10023 /* For the purposes of register save area addressing, the stack
10024 pointer is no longer valid. As for the value of sp_offset,
10025 see ix86_compute_frame_layout, which we need to match in order
10026 to pass verification of stack_pointer_offset at the end. */
10027 m->fs.sp_offset = (m->fs.sp_offset + align_bytes) & -align_bytes;
10028 m->fs.sp_valid = false;
10031 allocate = frame.stack_pointer_offset - m->fs.sp_offset;
10033 if (flag_stack_usage_info)
10035 /* We start to count from ARG_POINTER. */
10036 HOST_WIDE_INT stack_size = frame.stack_pointer_offset;
10038 /* If it was realigned, take into account the fake frame. */
10039 if (stack_realign_drap)
10041 if (ix86_static_chain_on_stack)
10042 stack_size += UNITS_PER_WORD;
10044 if (!call_used_regs[REGNO (crtl->drap_reg)])
10045 stack_size += UNITS_PER_WORD;
10047 /* This over-estimates by 1 minimal-stack-alignment-unit but
10048 mitigates that by counting in the new return address slot. */
10049 current_function_dynamic_stack_size
10050 += crtl->stack_alignment_needed / BITS_PER_UNIT;
10053 current_function_static_stack_size = stack_size;
10056 /* The stack has already been decremented by the instruction calling us
10057 so probe if the size is non-negative to preserve the protection area. */
10058 if (allocate >= 0 && flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
10060 /* We expect the registers to be saved when probes are used. */
10061 gcc_assert (int_registers_saved);
10063 if (STACK_CHECK_MOVING_SP)
10065 ix86_adjust_stack_and_probe (allocate);
10070 HOST_WIDE_INT size = allocate;
10072 if (TARGET_64BIT && size >= (HOST_WIDE_INT) 0x80000000)
10073 size = 0x80000000 - STACK_CHECK_PROTECT - 1;
10075 if (TARGET_STACK_PROBE)
10076 ix86_emit_probe_stack_range (0, size + STACK_CHECK_PROTECT);
10078 ix86_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
10084 else if (!ix86_target_stack_probe ()
10085 || frame.stack_pointer_offset < CHECK_STACK_LIMIT)
10087 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10088 GEN_INT (-allocate), -1,
10089 m->fs.cfa_reg == stack_pointer_rtx);
10093 rtx eax = gen_rtx_REG (Pmode, AX_REG);
10095 rtx (*adjust_stack_insn)(rtx, rtx, rtx);
10097 bool eax_live = false;
10098 bool r10_live = false;
10101 r10_live = (DECL_STATIC_CHAIN (current_function_decl) != 0);
10102 if (!TARGET_64BIT_MS_ABI)
10103 eax_live = ix86_eax_live_at_start_p ();
10107 emit_insn (gen_push (eax));
10108 allocate -= UNITS_PER_WORD;
10112 r10 = gen_rtx_REG (Pmode, R10_REG);
10113 emit_insn (gen_push (r10));
10114 allocate -= UNITS_PER_WORD;
10117 emit_move_insn (eax, GEN_INT (allocate));
10118 emit_insn (ix86_gen_allocate_stack_worker (eax, eax));
10120 /* Use the fact that AX still contains ALLOCATE. */
10121 adjust_stack_insn = (TARGET_64BIT
10122 ? gen_pro_epilogue_adjust_stack_di_sub
10123 : gen_pro_epilogue_adjust_stack_si_sub);
10125 insn = emit_insn (adjust_stack_insn (stack_pointer_rtx,
10126 stack_pointer_rtx, eax));
10128 /* Note that SEH directives need to continue tracking the stack
10129 pointer even after the frame pointer has been set up. */
10130 if (m->fs.cfa_reg == stack_pointer_rtx || TARGET_SEH)
10132 if (m->fs.cfa_reg == stack_pointer_rtx)
10133 m->fs.cfa_offset += allocate;
10135 RTX_FRAME_RELATED_P (insn) = 1;
10136 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10137 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10138 plus_constant (stack_pointer_rtx,
10141 m->fs.sp_offset += allocate;
10143 if (r10_live && eax_live)
10145 t = choose_baseaddr (m->fs.sp_offset - allocate);
10146 emit_move_insn (r10, gen_frame_mem (Pmode, t));
10147 t = choose_baseaddr (m->fs.sp_offset - allocate - UNITS_PER_WORD);
10148 emit_move_insn (eax, gen_frame_mem (Pmode, t));
10150 else if (eax_live || r10_live)
10152 t = choose_baseaddr (m->fs.sp_offset - allocate);
10153 emit_move_insn ((eax_live ? eax : r10), gen_frame_mem (Pmode, t));
10156 gcc_assert (m->fs.sp_offset == frame.stack_pointer_offset);
10158 /* If we havn't already set up the frame pointer, do so now. */
10159 if (frame_pointer_needed && !m->fs.fp_valid)
10161 insn = ix86_gen_add3 (hard_frame_pointer_rtx, stack_pointer_rtx,
10162 GEN_INT (frame.stack_pointer_offset
10163 - frame.hard_frame_pointer_offset));
10164 insn = emit_insn (insn);
10165 RTX_FRAME_RELATED_P (insn) = 1;
10166 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
10168 if (m->fs.cfa_reg == stack_pointer_rtx)
10169 m->fs.cfa_reg = hard_frame_pointer_rtx;
10170 m->fs.fp_offset = frame.hard_frame_pointer_offset;
10171 m->fs.fp_valid = true;
10174 if (!int_registers_saved)
10175 ix86_emit_save_regs_using_mov (frame.reg_save_offset);
10176 if (frame.nsseregs)
10177 ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
10179 pic_reg_used = false;
10180 if (pic_offset_table_rtx
10181 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
10184 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
10186 if (alt_pic_reg_used != INVALID_REGNUM)
10187 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
10189 pic_reg_used = true;
10196 if (ix86_cmodel == CM_LARGE_PIC)
10198 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
10199 rtx label = gen_label_rtx ();
10200 emit_label (label);
10201 LABEL_PRESERVE_P (label) = 1;
10202 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
10203 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
10204 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
10205 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
10206 pic_offset_table_rtx, tmp_reg));
10209 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
10213 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
10214 RTX_FRAME_RELATED_P (insn) = 1;
10215 add_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL_RTX);
10219 /* In the pic_reg_used case, make sure that the got load isn't deleted
10220 when mcount needs it. Blockage to avoid call movement across mcount
10221 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
10223 if (crtl->profile && !flag_fentry && pic_reg_used)
10224 emit_insn (gen_prologue_use (pic_offset_table_rtx));
10226 if (crtl->drap_reg && !crtl->stack_realign_needed)
10228 /* vDRAP is setup but after reload it turns out stack realign
10229 isn't necessary, here we will emit prologue to setup DRAP
10230 without stack realign adjustment */
10231 t = choose_baseaddr (0);
10232 emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, t));
10235 /* Prevent instructions from being scheduled into register save push
10236 sequence when access to the redzone area is done through frame pointer.
10237 The offset between the frame pointer and the stack pointer is calculated
10238 relative to the value of the stack pointer at the end of the function
10239 prologue, and moving instructions that access redzone area via frame
10240 pointer inside push sequence violates this assumption. */
10241 if (frame_pointer_needed && frame.red_zone_size)
10242 emit_insn (gen_memory_blockage ());
10244 /* Emit cld instruction if stringops are used in the function. */
10245 if (TARGET_CLD && ix86_current_function_needs_cld)
10246 emit_insn (gen_cld ());
10248 /* SEH requires that the prologue end within 256 bytes of the start of
10249 the function. Prevent instruction schedules that would extend that. */
10251 emit_insn (gen_blockage ());
10254 /* Emit code to restore REG using a POP insn. */
10257 ix86_emit_restore_reg_using_pop (rtx reg)
10259 struct machine_function *m = cfun->machine;
10260 rtx insn = emit_insn (gen_pop (reg));
10262 ix86_add_cfa_restore_note (insn, reg, m->fs.sp_offset);
10263 m->fs.sp_offset -= UNITS_PER_WORD;
10265 if (m->fs.cfa_reg == crtl->drap_reg
10266 && REGNO (reg) == REGNO (crtl->drap_reg))
10268 /* Previously we'd represented the CFA as an expression
10269 like *(%ebp - 8). We've just popped that value from
10270 the stack, which means we need to reset the CFA to
10271 the drap register. This will remain until we restore
10272 the stack pointer. */
10273 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10274 RTX_FRAME_RELATED_P (insn) = 1;
10276 /* This means that the DRAP register is valid for addressing too. */
10277 m->fs.drap_valid = true;
10281 if (m->fs.cfa_reg == stack_pointer_rtx)
10283 rtx x = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
10284 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
10285 add_reg_note (insn, REG_CFA_ADJUST_CFA, x);
10286 RTX_FRAME_RELATED_P (insn) = 1;
10288 m->fs.cfa_offset -= UNITS_PER_WORD;
10291 /* When the frame pointer is the CFA, and we pop it, we are
10292 swapping back to the stack pointer as the CFA. This happens
10293 for stack frames that don't allocate other data, so we assume
10294 the stack pointer is now pointing at the return address, i.e.
10295 the function entry state, which makes the offset be 1 word. */
10296 if (reg == hard_frame_pointer_rtx)
10298 m->fs.fp_valid = false;
10299 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10301 m->fs.cfa_reg = stack_pointer_rtx;
10302 m->fs.cfa_offset -= UNITS_PER_WORD;
10304 add_reg_note (insn, REG_CFA_DEF_CFA,
10305 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10306 GEN_INT (m->fs.cfa_offset)));
10307 RTX_FRAME_RELATED_P (insn) = 1;
10312 /* Emit code to restore saved registers using POP insns. */
10315 ix86_emit_restore_regs_using_pop (void)
10317 unsigned int regno;
10319 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10320 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
10321 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno));
10324 /* Emit code and notes for the LEAVE instruction. */
10327 ix86_emit_leave (void)
10329 struct machine_function *m = cfun->machine;
10330 rtx insn = emit_insn (ix86_gen_leave ());
10332 ix86_add_queued_cfa_restore_notes (insn);
10334 gcc_assert (m->fs.fp_valid);
10335 m->fs.sp_valid = true;
10336 m->fs.sp_offset = m->fs.fp_offset - UNITS_PER_WORD;
10337 m->fs.fp_valid = false;
10339 if (m->fs.cfa_reg == hard_frame_pointer_rtx)
10341 m->fs.cfa_reg = stack_pointer_rtx;
10342 m->fs.cfa_offset = m->fs.sp_offset;
10344 add_reg_note (insn, REG_CFA_DEF_CFA,
10345 plus_constant (stack_pointer_rtx, m->fs.sp_offset));
10346 RTX_FRAME_RELATED_P (insn) = 1;
10347 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx,
10352 /* Emit code to restore saved registers using MOV insns.
10353 First register is restored from CFA - CFA_OFFSET. */
10355 ix86_emit_restore_regs_using_mov (HOST_WIDE_INT cfa_offset,
10356 bool maybe_eh_return)
10358 struct machine_function *m = cfun->machine;
10359 unsigned int regno;
10361 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10362 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10364 rtx reg = gen_rtx_REG (Pmode, regno);
10367 mem = choose_baseaddr (cfa_offset);
10368 mem = gen_frame_mem (Pmode, mem);
10369 insn = emit_move_insn (reg, mem);
10371 if (m->fs.cfa_reg == crtl->drap_reg && regno == REGNO (crtl->drap_reg))
10373 /* Previously we'd represented the CFA as an expression
10374 like *(%ebp - 8). We've just popped that value from
10375 the stack, which means we need to reset the CFA to
10376 the drap register. This will remain until we restore
10377 the stack pointer. */
10378 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
10379 RTX_FRAME_RELATED_P (insn) = 1;
10381 /* This means that the DRAP register is valid for addressing. */
10382 m->fs.drap_valid = true;
10385 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10387 cfa_offset -= UNITS_PER_WORD;
10391 /* Emit code to restore saved registers using MOV insns.
10392 First register is restored from CFA - CFA_OFFSET. */
10394 ix86_emit_restore_sse_regs_using_mov (HOST_WIDE_INT cfa_offset,
10395 bool maybe_eh_return)
10397 unsigned int regno;
10399 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10400 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
10402 rtx reg = gen_rtx_REG (V4SFmode, regno);
10405 mem = choose_baseaddr (cfa_offset);
10406 mem = gen_rtx_MEM (V4SFmode, mem);
10407 set_mem_align (mem, 128);
10408 emit_move_insn (reg, mem);
10410 ix86_add_cfa_restore_note (NULL_RTX, reg, cfa_offset);
10416 /* Restore function stack, frame, and registers. */
10419 ix86_expand_epilogue (int style)
10421 struct machine_function *m = cfun->machine;
10422 struct machine_frame_state frame_state_save = m->fs;
10423 struct ix86_frame frame;
10424 bool restore_regs_via_mov;
10427 ix86_finalize_stack_realign_flags ();
10428 ix86_compute_frame_layout (&frame);
10430 m->fs.sp_valid = (!frame_pointer_needed
10431 || (current_function_sp_is_unchanging
10432 && !stack_realign_fp));
10433 gcc_assert (!m->fs.sp_valid
10434 || m->fs.sp_offset == frame.stack_pointer_offset);
10436 /* The FP must be valid if the frame pointer is present. */
10437 gcc_assert (frame_pointer_needed == m->fs.fp_valid);
10438 gcc_assert (!m->fs.fp_valid
10439 || m->fs.fp_offset == frame.hard_frame_pointer_offset);
10441 /* We must have *some* valid pointer to the stack frame. */
10442 gcc_assert (m->fs.sp_valid || m->fs.fp_valid);
10444 /* The DRAP is never valid at this point. */
10445 gcc_assert (!m->fs.drap_valid);
10447 /* See the comment about red zone and frame
10448 pointer usage in ix86_expand_prologue. */
10449 if (frame_pointer_needed && frame.red_zone_size)
10450 emit_insn (gen_memory_blockage ());
10452 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
10453 gcc_assert (!using_drap || m->fs.cfa_reg == crtl->drap_reg);
10455 /* Determine the CFA offset of the end of the red-zone. */
10456 m->fs.red_zone_offset = 0;
10457 if (ix86_using_red_zone () && crtl->args.pops_args < 65536)
10459 /* The red-zone begins below the return address. */
10460 m->fs.red_zone_offset = RED_ZONE_SIZE + UNITS_PER_WORD;
10462 /* When the register save area is in the aligned portion of
10463 the stack, determine the maximum runtime displacement that
10464 matches up with the aligned frame. */
10465 if (stack_realign_drap)
10466 m->fs.red_zone_offset -= (crtl->stack_alignment_needed / BITS_PER_UNIT
10470 /* Special care must be taken for the normal return case of a function
10471 using eh_return: the eax and edx registers are marked as saved, but
10472 not restored along this path. Adjust the save location to match. */
10473 if (crtl->calls_eh_return && style != 2)
10474 frame.reg_save_offset -= 2 * UNITS_PER_WORD;
10476 /* EH_RETURN requires the use of moves to function properly. */
10477 if (crtl->calls_eh_return)
10478 restore_regs_via_mov = true;
10479 /* SEH requires the use of pops to identify the epilogue. */
10480 else if (TARGET_SEH)
10481 restore_regs_via_mov = false;
10482 /* If we're only restoring one register and sp is not valid then
10483 using a move instruction to restore the register since it's
10484 less work than reloading sp and popping the register. */
10485 else if (!m->fs.sp_valid && frame.nregs <= 1)
10486 restore_regs_via_mov = true;
10487 else if (TARGET_EPILOGUE_USING_MOVE
10488 && cfun->machine->use_fast_prologue_epilogue
10489 && (frame.nregs > 1
10490 || m->fs.sp_offset != frame.reg_save_offset))
10491 restore_regs_via_mov = true;
10492 else if (frame_pointer_needed
10494 && m->fs.sp_offset != frame.reg_save_offset)
10495 restore_regs_via_mov = true;
10496 else if (frame_pointer_needed
10497 && TARGET_USE_LEAVE
10498 && cfun->machine->use_fast_prologue_epilogue
10499 && frame.nregs == 1)
10500 restore_regs_via_mov = true;
10502 restore_regs_via_mov = false;
10504 if (restore_regs_via_mov || frame.nsseregs)
10506 /* Ensure that the entire register save area is addressable via
10507 the stack pointer, if we will restore via sp. */
10509 && m->fs.sp_offset > 0x7fffffff
10510 && !(m->fs.fp_valid || m->fs.drap_valid)
10511 && (frame.nsseregs + frame.nregs) != 0)
10513 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10514 GEN_INT (m->fs.sp_offset
10515 - frame.sse_reg_save_offset),
10517 m->fs.cfa_reg == stack_pointer_rtx);
10521 /* If there are any SSE registers to restore, then we have to do it
10522 via moves, since there's obviously no pop for SSE regs. */
10523 if (frame.nsseregs)
10524 ix86_emit_restore_sse_regs_using_mov (frame.sse_reg_save_offset,
10527 if (restore_regs_via_mov)
10532 ix86_emit_restore_regs_using_mov (frame.reg_save_offset, style == 2);
10534 /* eh_return epilogues need %ecx added to the stack pointer. */
10537 rtx insn, sa = EH_RETURN_STACKADJ_RTX;
10539 /* Stack align doesn't work with eh_return. */
10540 gcc_assert (!stack_realign_drap);
10541 /* Neither does regparm nested functions. */
10542 gcc_assert (!ix86_static_chain_on_stack);
10544 if (frame_pointer_needed)
10546 t = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
10547 t = plus_constant (t, m->fs.fp_offset - UNITS_PER_WORD);
10548 emit_insn (gen_rtx_SET (VOIDmode, sa, t));
10550 t = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
10551 insn = emit_move_insn (hard_frame_pointer_rtx, t);
10553 /* Note that we use SA as a temporary CFA, as the return
10554 address is at the proper place relative to it. We
10555 pretend this happens at the FP restore insn because
10556 prior to this insn the FP would be stored at the wrong
10557 offset relative to SA, and after this insn we have no
10558 other reasonable register to use for the CFA. We don't
10559 bother resetting the CFA to the SP for the duration of
10560 the return insn. */
10561 add_reg_note (insn, REG_CFA_DEF_CFA,
10562 plus_constant (sa, UNITS_PER_WORD));
10563 ix86_add_queued_cfa_restore_notes (insn);
10564 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
10565 RTX_FRAME_RELATED_P (insn) = 1;
10567 m->fs.cfa_reg = sa;
10568 m->fs.cfa_offset = UNITS_PER_WORD;
10569 m->fs.fp_valid = false;
10571 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
10572 const0_rtx, style, false);
10576 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
10577 t = plus_constant (t, m->fs.sp_offset - UNITS_PER_WORD);
10578 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, t));
10579 ix86_add_queued_cfa_restore_notes (insn);
10581 gcc_assert (m->fs.cfa_reg == stack_pointer_rtx);
10582 if (m->fs.cfa_offset != UNITS_PER_WORD)
10584 m->fs.cfa_offset = UNITS_PER_WORD;
10585 add_reg_note (insn, REG_CFA_DEF_CFA,
10586 plus_constant (stack_pointer_rtx,
10588 RTX_FRAME_RELATED_P (insn) = 1;
10591 m->fs.sp_offset = UNITS_PER_WORD;
10592 m->fs.sp_valid = true;
10597 /* SEH requires that the function end with (1) a stack adjustment
10598 if necessary, (2) a sequence of pops, and (3) a return or
10599 jump instruction. Prevent insns from the function body from
10600 being scheduled into this sequence. */
10603 /* Prevent a catch region from being adjacent to the standard
10604 epilogue sequence. Unfortuantely crtl->uses_eh_lsda nor
10605 several other flags that would be interesting to test are
10607 if (flag_non_call_exceptions)
10608 emit_insn (gen_nops (const1_rtx));
10610 emit_insn (gen_blockage ());
10613 /* First step is to deallocate the stack frame so that we can
10614 pop the registers. */
10615 if (!m->fs.sp_valid)
10617 pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx,
10618 GEN_INT (m->fs.fp_offset
10619 - frame.reg_save_offset),
10622 else if (m->fs.sp_offset != frame.reg_save_offset)
10624 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10625 GEN_INT (m->fs.sp_offset
10626 - frame.reg_save_offset),
10628 m->fs.cfa_reg == stack_pointer_rtx);
10631 ix86_emit_restore_regs_using_pop ();
10634 /* If we used a stack pointer and haven't already got rid of it,
10636 if (m->fs.fp_valid)
10638 /* If the stack pointer is valid and pointing at the frame
10639 pointer store address, then we only need a pop. */
10640 if (m->fs.sp_valid && m->fs.sp_offset == frame.hfp_save_offset)
10641 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
10642 /* Leave results in shorter dependency chains on CPUs that are
10643 able to grok it fast. */
10644 else if (TARGET_USE_LEAVE
10645 || optimize_function_for_size_p (cfun)
10646 || !cfun->machine->use_fast_prologue_epilogue)
10647 ix86_emit_leave ();
10650 pro_epilogue_adjust_stack (stack_pointer_rtx,
10651 hard_frame_pointer_rtx,
10652 const0_rtx, style, !using_drap);
10653 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx);
10659 int param_ptr_offset = UNITS_PER_WORD;
10662 gcc_assert (stack_realign_drap);
10664 if (ix86_static_chain_on_stack)
10665 param_ptr_offset += UNITS_PER_WORD;
10666 if (!call_used_regs[REGNO (crtl->drap_reg)])
10667 param_ptr_offset += UNITS_PER_WORD;
10669 insn = emit_insn (gen_rtx_SET
10670 (VOIDmode, stack_pointer_rtx,
10671 gen_rtx_PLUS (Pmode,
10673 GEN_INT (-param_ptr_offset))));
10674 m->fs.cfa_reg = stack_pointer_rtx;
10675 m->fs.cfa_offset = param_ptr_offset;
10676 m->fs.sp_offset = param_ptr_offset;
10677 m->fs.realigned = false;
10679 add_reg_note (insn, REG_CFA_DEF_CFA,
10680 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10681 GEN_INT (param_ptr_offset)));
10682 RTX_FRAME_RELATED_P (insn) = 1;
10684 if (!call_used_regs[REGNO (crtl->drap_reg)])
10685 ix86_emit_restore_reg_using_pop (crtl->drap_reg);
10688 /* At this point the stack pointer must be valid, and we must have
10689 restored all of the registers. We may not have deallocated the
10690 entire stack frame. We've delayed this until now because it may
10691 be possible to merge the local stack deallocation with the
10692 deallocation forced by ix86_static_chain_on_stack. */
10693 gcc_assert (m->fs.sp_valid);
10694 gcc_assert (!m->fs.fp_valid);
10695 gcc_assert (!m->fs.realigned);
10696 if (m->fs.sp_offset != UNITS_PER_WORD)
10698 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10699 GEN_INT (m->fs.sp_offset - UNITS_PER_WORD),
10703 /* Sibcall epilogues don't want a return instruction. */
10706 m->fs = frame_state_save;
10710 /* Emit vzeroupper if needed. */
10711 if (TARGET_VZEROUPPER
10712 && !TREE_THIS_VOLATILE (cfun->decl)
10713 && !cfun->machine->caller_return_avx256_p)
10714 emit_insn (gen_avx_vzeroupper (GEN_INT (call_no_avx256)));
10716 if (crtl->args.pops_args && crtl->args.size)
10718 rtx popc = GEN_INT (crtl->args.pops_args);
10720 /* i386 can only pop 64K bytes. If asked to pop more, pop return
10721 address, do explicit add, and jump indirectly to the caller. */
10723 if (crtl->args.pops_args >= 65536)
10725 rtx ecx = gen_rtx_REG (SImode, CX_REG);
10728 /* There is no "pascal" calling convention in any 64bit ABI. */
10729 gcc_assert (!TARGET_64BIT);
10731 insn = emit_insn (gen_pop (ecx));
10732 m->fs.cfa_offset -= UNITS_PER_WORD;
10733 m->fs.sp_offset -= UNITS_PER_WORD;
10735 add_reg_note (insn, REG_CFA_ADJUST_CFA,
10736 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
10737 add_reg_note (insn, REG_CFA_REGISTER,
10738 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
10739 RTX_FRAME_RELATED_P (insn) = 1;
10741 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
10743 emit_jump_insn (gen_return_indirect_internal (ecx));
10746 emit_jump_insn (gen_return_pop_internal (popc));
10749 emit_jump_insn (gen_return_internal ());
10751 /* Restore the state back to the state from the prologue,
10752 so that it's correct for the next epilogue. */
10753 m->fs = frame_state_save;
10756 /* Reset from the function's potential modifications. */
10759 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10760 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
10762 if (pic_offset_table_rtx)
10763 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
10765 /* Mach-O doesn't support labels at the end of objects, so if
10766 it looks like we might want one, insert a NOP. */
10768 rtx insn = get_last_insn ();
10771 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
10772 insn = PREV_INSN (insn);
10776 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
10777 fputs ("\tnop\n", file);
10783 /* Return a scratch register to use in the split stack prologue. The
10784 split stack prologue is used for -fsplit-stack. It is the first
10785 instructions in the function, even before the regular prologue.
10786 The scratch register can be any caller-saved register which is not
10787 used for parameters or for the static chain. */
10789 static unsigned int
10790 split_stack_prologue_scratch_regno (void)
10799 is_fastcall = (lookup_attribute ("fastcall",
10800 TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
10802 regparm = ix86_function_regparm (TREE_TYPE (cfun->decl), cfun->decl);
10806 if (DECL_STATIC_CHAIN (cfun->decl))
10808 sorry ("-fsplit-stack does not support fastcall with "
10809 "nested function");
10810 return INVALID_REGNUM;
10814 else if (regparm < 3)
10816 if (!DECL_STATIC_CHAIN (cfun->decl))
10822 sorry ("-fsplit-stack does not support 2 register "
10823 " parameters for a nested function");
10824 return INVALID_REGNUM;
10831 /* FIXME: We could make this work by pushing a register
10832 around the addition and comparison. */
10833 sorry ("-fsplit-stack does not support 3 register parameters");
10834 return INVALID_REGNUM;
10839 /* A SYMBOL_REF for the function which allocates new stackspace for
10842 static GTY(()) rtx split_stack_fn;
10844 /* A SYMBOL_REF for the more stack function when using the large
10847 static GTY(()) rtx split_stack_fn_large;
10849 /* Handle -fsplit-stack. These are the first instructions in the
10850 function, even before the regular prologue. */
10853 ix86_expand_split_stack_prologue (void)
10855 struct ix86_frame frame;
10856 HOST_WIDE_INT allocate;
10857 unsigned HOST_WIDE_INT args_size;
10858 rtx label, limit, current, jump_insn, allocate_rtx, call_insn, call_fusage;
10859 rtx scratch_reg = NULL_RTX;
10860 rtx varargs_label = NULL_RTX;
10863 gcc_assert (flag_split_stack && reload_completed);
10865 ix86_finalize_stack_realign_flags ();
10866 ix86_compute_frame_layout (&frame);
10867 allocate = frame.stack_pointer_offset - INCOMING_FRAME_SP_OFFSET;
10869 /* This is the label we will branch to if we have enough stack
10870 space. We expect the basic block reordering pass to reverse this
10871 branch if optimizing, so that we branch in the unlikely case. */
10872 label = gen_label_rtx ();
10874 /* We need to compare the stack pointer minus the frame size with
10875 the stack boundary in the TCB. The stack boundary always gives
10876 us SPLIT_STACK_AVAILABLE bytes, so if we need less than that we
10877 can compare directly. Otherwise we need to do an addition. */
10879 limit = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
10880 UNSPEC_STACK_CHECK);
10881 limit = gen_rtx_CONST (Pmode, limit);
10882 limit = gen_rtx_MEM (Pmode, limit);
10883 if (allocate < SPLIT_STACK_AVAILABLE)
10884 current = stack_pointer_rtx;
10887 unsigned int scratch_regno;
10890 /* We need a scratch register to hold the stack pointer minus
10891 the required frame size. Since this is the very start of the
10892 function, the scratch register can be any caller-saved
10893 register which is not used for parameters. */
10894 offset = GEN_INT (- allocate);
10895 scratch_regno = split_stack_prologue_scratch_regno ();
10896 if (scratch_regno == INVALID_REGNUM)
10898 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
10899 if (!TARGET_64BIT || x86_64_immediate_operand (offset, Pmode))
10901 /* We don't use ix86_gen_add3 in this case because it will
10902 want to split to lea, but when not optimizing the insn
10903 will not be split after this point. */
10904 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
10905 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10910 emit_move_insn (scratch_reg, offset);
10911 emit_insn (gen_adddi3 (scratch_reg, scratch_reg,
10912 stack_pointer_rtx));
10914 current = scratch_reg;
10917 ix86_expand_branch (GEU, current, limit, label);
10918 jump_insn = get_last_insn ();
10919 JUMP_LABEL (jump_insn) = label;
10921 /* Mark the jump as very likely to be taken. */
10922 add_reg_note (jump_insn, REG_BR_PROB,
10923 GEN_INT (REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100));
10925 if (split_stack_fn == NULL_RTX)
10926 split_stack_fn = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
10927 fn = split_stack_fn;
10929 /* Get more stack space. We pass in the desired stack space and the
10930 size of the arguments to copy to the new stack. In 32-bit mode
10931 we push the parameters; __morestack will return on a new stack
10932 anyhow. In 64-bit mode we pass the parameters in r10 and
10934 allocate_rtx = GEN_INT (allocate);
10935 args_size = crtl->args.size >= 0 ? crtl->args.size : 0;
10936 call_fusage = NULL_RTX;
10941 reg10 = gen_rtx_REG (Pmode, R10_REG);
10942 reg11 = gen_rtx_REG (Pmode, R11_REG);
10944 /* If this function uses a static chain, it will be in %r10.
10945 Preserve it across the call to __morestack. */
10946 if (DECL_STATIC_CHAIN (cfun->decl))
10950 rax = gen_rtx_REG (Pmode, AX_REG);
10951 emit_move_insn (rax, reg10);
10952 use_reg (&call_fusage, rax);
10955 if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
10957 HOST_WIDE_INT argval;
10959 /* When using the large model we need to load the address
10960 into a register, and we've run out of registers. So we
10961 switch to a different calling convention, and we call a
10962 different function: __morestack_large. We pass the
10963 argument size in the upper 32 bits of r10 and pass the
10964 frame size in the lower 32 bits. */
10965 gcc_assert ((allocate & (HOST_WIDE_INT) 0xffffffff) == allocate);
10966 gcc_assert ((args_size & 0xffffffff) == args_size);
10968 if (split_stack_fn_large == NULL_RTX)
10969 split_stack_fn_large =
10970 gen_rtx_SYMBOL_REF (Pmode, "__morestack_large_model");
10972 if (ix86_cmodel == CM_LARGE_PIC)
10976 label = gen_label_rtx ();
10977 emit_label (label);
10978 LABEL_PRESERVE_P (label) = 1;
10979 emit_insn (gen_set_rip_rex64 (reg10, label));
10980 emit_insn (gen_set_got_offset_rex64 (reg11, label));
10981 emit_insn (gen_adddi3 (reg10, reg10, reg11));
10982 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, split_stack_fn_large),
10984 x = gen_rtx_CONST (Pmode, x);
10985 emit_move_insn (reg11, x);
10986 x = gen_rtx_PLUS (Pmode, reg10, reg11);
10987 x = gen_const_mem (Pmode, x);
10988 emit_move_insn (reg11, x);
10991 emit_move_insn (reg11, split_stack_fn_large);
10995 argval = ((args_size << 16) << 16) + allocate;
10996 emit_move_insn (reg10, GEN_INT (argval));
11000 emit_move_insn (reg10, allocate_rtx);
11001 emit_move_insn (reg11, GEN_INT (args_size));
11002 use_reg (&call_fusage, reg11);
11005 use_reg (&call_fusage, reg10);
11009 emit_insn (gen_push (GEN_INT (args_size)));
11010 emit_insn (gen_push (allocate_rtx));
11012 call_insn = ix86_expand_call (NULL_RTX, gen_rtx_MEM (QImode, fn),
11013 GEN_INT (UNITS_PER_WORD), constm1_rtx,
11015 add_function_usage_to (call_insn, call_fusage);
11017 /* In order to make call/return prediction work right, we now need
11018 to execute a return instruction. See
11019 libgcc/config/i386/morestack.S for the details on how this works.
11021 For flow purposes gcc must not see this as a return
11022 instruction--we need control flow to continue at the subsequent
11023 label. Therefore, we use an unspec. */
11024 gcc_assert (crtl->args.pops_args < 65536);
11025 emit_insn (gen_split_stack_return (GEN_INT (crtl->args.pops_args)));
11027 /* If we are in 64-bit mode and this function uses a static chain,
11028 we saved %r10 in %rax before calling _morestack. */
11029 if (TARGET_64BIT && DECL_STATIC_CHAIN (cfun->decl))
11030 emit_move_insn (gen_rtx_REG (Pmode, R10_REG),
11031 gen_rtx_REG (Pmode, AX_REG));
11033 /* If this function calls va_start, we need to store a pointer to
11034 the arguments on the old stack, because they may not have been
11035 all copied to the new stack. At this point the old stack can be
11036 found at the frame pointer value used by __morestack, because
11037 __morestack has set that up before calling back to us. Here we
11038 store that pointer in a scratch register, and in
11039 ix86_expand_prologue we store the scratch register in a stack
11041 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11043 unsigned int scratch_regno;
11047 scratch_regno = split_stack_prologue_scratch_regno ();
11048 scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
11049 frame_reg = gen_rtx_REG (Pmode, BP_REG);
11053 return address within this function
11054 return address of caller of this function
11056 So we add three words to get to the stack arguments.
11060 return address within this function
11061 first argument to __morestack
11062 second argument to __morestack
11063 return address of caller of this function
11065 So we add five words to get to the stack arguments.
11067 words = TARGET_64BIT ? 3 : 5;
11068 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11069 gen_rtx_PLUS (Pmode, frame_reg,
11070 GEN_INT (words * UNITS_PER_WORD))));
11072 varargs_label = gen_label_rtx ();
11073 emit_jump_insn (gen_jump (varargs_label));
11074 JUMP_LABEL (get_last_insn ()) = varargs_label;
11079 emit_label (label);
11080 LABEL_NUSES (label) = 1;
11082 /* If this function calls va_start, we now have to set the scratch
11083 register for the case where we do not call __morestack. In this
11084 case we need to set it based on the stack pointer. */
11085 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11087 emit_insn (gen_rtx_SET (VOIDmode, scratch_reg,
11088 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11089 GEN_INT (UNITS_PER_WORD))));
11091 emit_label (varargs_label);
11092 LABEL_NUSES (varargs_label) = 1;
11096 /* We may have to tell the dataflow pass that the split stack prologue
11097 is initializing a scratch register. */
11100 ix86_live_on_entry (bitmap regs)
11102 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11104 gcc_assert (flag_split_stack);
11105 bitmap_set_bit (regs, split_stack_prologue_scratch_regno ());
11109 /* Determine if op is suitable SUBREG RTX for address. */
11112 ix86_address_subreg_operand (rtx op)
11114 enum machine_mode mode;
11119 mode = GET_MODE (op);
11121 if (GET_MODE_CLASS (mode) != MODE_INT)
11124 /* Don't allow SUBREGs that span more than a word. It can lead to spill
11125 failures when the register is one word out of a two word structure. */
11126 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
11129 /* Allow only SUBREGs of non-eliminable hard registers. */
11130 return register_no_elim_operand (op, mode);
11133 /* Extract the parts of an RTL expression that is a valid memory address
11134 for an instruction. Return 0 if the structure of the address is
11135 grossly off. Return -1 if the address contains ASHIFT, so it is not
11136 strictly valid, but still used for computing length of lea instruction. */
11139 ix86_decompose_address (rtx addr, struct ix86_address *out)
11141 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
11142 rtx base_reg, index_reg;
11143 HOST_WIDE_INT scale = 1;
11144 rtx scale_rtx = NULL_RTX;
11147 enum ix86_address_seg seg = SEG_DEFAULT;
11151 else if (GET_CODE (addr) == SUBREG)
11153 if (ix86_address_subreg_operand (SUBREG_REG (addr)))
11158 else if (GET_CODE (addr) == PLUS)
11160 rtx addends[4], op;
11168 addends[n++] = XEXP (op, 1);
11171 while (GET_CODE (op) == PLUS);
11176 for (i = n; i >= 0; --i)
11179 switch (GET_CODE (op))
11184 index = XEXP (op, 0);
11185 scale_rtx = XEXP (op, 1);
11191 index = XEXP (op, 0);
11192 tmp = XEXP (op, 1);
11193 if (!CONST_INT_P (tmp))
11195 scale = INTVAL (tmp);
11196 if ((unsigned HOST_WIDE_INT) scale > 3)
11198 scale = 1 << scale;
11202 if (XINT (op, 1) == UNSPEC_TP
11203 && TARGET_TLS_DIRECT_SEG_REFS
11204 && seg == SEG_DEFAULT)
11205 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
11211 if (!ix86_address_subreg_operand (SUBREG_REG (op)))
11238 else if (GET_CODE (addr) == MULT)
11240 index = XEXP (addr, 0); /* index*scale */
11241 scale_rtx = XEXP (addr, 1);
11243 else if (GET_CODE (addr) == ASHIFT)
11245 /* We're called for lea too, which implements ashift on occasion. */
11246 index = XEXP (addr, 0);
11247 tmp = XEXP (addr, 1);
11248 if (!CONST_INT_P (tmp))
11250 scale = INTVAL (tmp);
11251 if ((unsigned HOST_WIDE_INT) scale > 3)
11253 scale = 1 << scale;
11257 disp = addr; /* displacement */
11263 else if (GET_CODE (index) == SUBREG
11264 && ix86_address_subreg_operand (SUBREG_REG (index)))
11270 /* Extract the integral value of scale. */
11273 if (!CONST_INT_P (scale_rtx))
11275 scale = INTVAL (scale_rtx);
11278 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
11279 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
11281 /* Avoid useless 0 displacement. */
11282 if (disp == const0_rtx && (base || index))
11285 /* Allow arg pointer and stack pointer as index if there is not scaling. */
11286 if (base_reg && index_reg && scale == 1
11287 && (index_reg == arg_pointer_rtx
11288 || index_reg == frame_pointer_rtx
11289 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
11292 tmp = base, base = index, index = tmp;
11293 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
11296 /* Special case: %ebp cannot be encoded as a base without a displacement.
11300 && (base_reg == hard_frame_pointer_rtx
11301 || base_reg == frame_pointer_rtx
11302 || base_reg == arg_pointer_rtx
11303 || (REG_P (base_reg)
11304 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
11305 || REGNO (base_reg) == R13_REG))))
11308 /* Special case: on K6, [%esi] makes the instruction vector decoded.
11309 Avoid this by transforming to [%esi+0].
11310 Reload calls address legitimization without cfun defined, so we need
11311 to test cfun for being non-NULL. */
11312 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
11313 && base_reg && !index_reg && !disp
11314 && REG_P (base_reg) && REGNO (base_reg) == SI_REG)
11317 /* Special case: encode reg+reg instead of reg*2. */
11318 if (!base && index && scale == 2)
11319 base = index, base_reg = index_reg, scale = 1;
11321 /* Special case: scaling cannot be encoded without base or displacement. */
11322 if (!base && !disp && index && scale != 1)
11326 out->index = index;
11328 out->scale = scale;
11334 /* Return cost of the memory address x.
11335 For i386, it is better to use a complex address than let gcc copy
11336 the address into a reg and make a new pseudo. But not if the address
11337 requires to two regs - that would mean more pseudos with longer
11340 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
11342 struct ix86_address parts;
11344 int ok = ix86_decompose_address (x, &parts);
11348 if (parts.base && GET_CODE (parts.base) == SUBREG)
11349 parts.base = SUBREG_REG (parts.base);
11350 if (parts.index && GET_CODE (parts.index) == SUBREG)
11351 parts.index = SUBREG_REG (parts.index);
11353 /* Attempt to minimize number of registers in the address. */
11355 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
11357 && (!REG_P (parts.index)
11358 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
11362 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
11364 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
11365 && parts.base != parts.index)
11368 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
11369 since it's predecode logic can't detect the length of instructions
11370 and it degenerates to vector decoded. Increase cost of such
11371 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
11372 to split such addresses or even refuse such addresses at all.
11374 Following addressing modes are affected:
11379 The first and last case may be avoidable by explicitly coding the zero in
11380 memory address, but I don't have AMD-K6 machine handy to check this
11384 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
11385 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
11386 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
11392 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
11393 this is used for to form addresses to local data when -fPIC is in
11397 darwin_local_data_pic (rtx disp)
11399 return (GET_CODE (disp) == UNSPEC
11400 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
11403 /* Determine if a given RTX is a valid constant. We already know this
11404 satisfies CONSTANT_P. */
11407 ix86_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
11409 switch (GET_CODE (x))
11414 if (GET_CODE (x) == PLUS)
11416 if (!CONST_INT_P (XEXP (x, 1)))
11421 if (TARGET_MACHO && darwin_local_data_pic (x))
11424 /* Only some unspecs are valid as "constants". */
11425 if (GET_CODE (x) == UNSPEC)
11426 switch (XINT (x, 1))
11429 case UNSPEC_GOTOFF:
11430 case UNSPEC_PLTOFF:
11431 return TARGET_64BIT;
11433 case UNSPEC_NTPOFF:
11434 x = XVECEXP (x, 0, 0);
11435 return (GET_CODE (x) == SYMBOL_REF
11436 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11437 case UNSPEC_DTPOFF:
11438 x = XVECEXP (x, 0, 0);
11439 return (GET_CODE (x) == SYMBOL_REF
11440 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
11445 /* We must have drilled down to a symbol. */
11446 if (GET_CODE (x) == LABEL_REF)
11448 if (GET_CODE (x) != SYMBOL_REF)
11453 /* TLS symbols are never valid. */
11454 if (SYMBOL_REF_TLS_MODEL (x))
11457 /* DLLIMPORT symbols are never valid. */
11458 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11459 && SYMBOL_REF_DLLIMPORT_P (x))
11463 /* mdynamic-no-pic */
11464 if (MACHO_DYNAMIC_NO_PIC_P)
11465 return machopic_symbol_defined_p (x);
11470 if (GET_MODE (x) == TImode
11471 && x != CONST0_RTX (TImode)
11477 if (!standard_sse_constant_p (x))
11484 /* Otherwise we handle everything else in the move patterns. */
11488 /* Determine if it's legal to put X into the constant pool. This
11489 is not possible for the address of thread-local symbols, which
11490 is checked above. */
11493 ix86_cannot_force_const_mem (enum machine_mode mode, rtx x)
11495 /* We can always put integral constants and vectors in memory. */
11496 switch (GET_CODE (x))
11506 return !ix86_legitimate_constant_p (mode, x);
11510 /* Nonzero if the constant value X is a legitimate general operand
11511 when generating PIC code. It is given that flag_pic is on and
11512 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
11515 legitimate_pic_operand_p (rtx x)
11519 switch (GET_CODE (x))
11522 inner = XEXP (x, 0);
11523 if (GET_CODE (inner) == PLUS
11524 && CONST_INT_P (XEXP (inner, 1)))
11525 inner = XEXP (inner, 0);
11527 /* Only some unspecs are valid as "constants". */
11528 if (GET_CODE (inner) == UNSPEC)
11529 switch (XINT (inner, 1))
11532 case UNSPEC_GOTOFF:
11533 case UNSPEC_PLTOFF:
11534 return TARGET_64BIT;
11536 x = XVECEXP (inner, 0, 0);
11537 return (GET_CODE (x) == SYMBOL_REF
11538 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
11539 case UNSPEC_MACHOPIC_OFFSET:
11540 return legitimate_pic_address_disp_p (x);
11548 return legitimate_pic_address_disp_p (x);
11555 /* Determine if a given CONST RTX is a valid memory displacement
11559 legitimate_pic_address_disp_p (rtx disp)
11563 /* In 64bit mode we can allow direct addresses of symbols and labels
11564 when they are not dynamic symbols. */
11567 rtx op0 = disp, op1;
11569 switch (GET_CODE (disp))
11575 if (GET_CODE (XEXP (disp, 0)) != PLUS)
11577 op0 = XEXP (XEXP (disp, 0), 0);
11578 op1 = XEXP (XEXP (disp, 0), 1);
11579 if (!CONST_INT_P (op1)
11580 || INTVAL (op1) >= 16*1024*1024
11581 || INTVAL (op1) < -16*1024*1024)
11583 if (GET_CODE (op0) == LABEL_REF)
11585 if (GET_CODE (op0) != SYMBOL_REF)
11590 /* TLS references should always be enclosed in UNSPEC. */
11591 if (SYMBOL_REF_TLS_MODEL (op0))
11593 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
11594 && ix86_cmodel != CM_LARGE_PIC)
11602 if (GET_CODE (disp) != CONST)
11604 disp = XEXP (disp, 0);
11608 /* We are unsafe to allow PLUS expressions. This limit allowed distance
11609 of GOT tables. We should not need these anyway. */
11610 if (GET_CODE (disp) != UNSPEC
11611 || (XINT (disp, 1) != UNSPEC_GOTPCREL
11612 && XINT (disp, 1) != UNSPEC_GOTOFF
11613 && XINT (disp, 1) != UNSPEC_PCREL
11614 && XINT (disp, 1) != UNSPEC_PLTOFF))
11617 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
11618 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
11624 if (GET_CODE (disp) == PLUS)
11626 if (!CONST_INT_P (XEXP (disp, 1)))
11628 disp = XEXP (disp, 0);
11632 if (TARGET_MACHO && darwin_local_data_pic (disp))
11635 if (GET_CODE (disp) != UNSPEC)
11638 switch (XINT (disp, 1))
11643 /* We need to check for both symbols and labels because VxWorks loads
11644 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
11646 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
11647 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
11648 case UNSPEC_GOTOFF:
11649 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
11650 While ABI specify also 32bit relocation but we don't produce it in
11651 small PIC model at all. */
11652 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
11653 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
11655 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
11657 case UNSPEC_GOTTPOFF:
11658 case UNSPEC_GOTNTPOFF:
11659 case UNSPEC_INDNTPOFF:
11662 disp = XVECEXP (disp, 0, 0);
11663 return (GET_CODE (disp) == SYMBOL_REF
11664 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
11665 case UNSPEC_NTPOFF:
11666 disp = XVECEXP (disp, 0, 0);
11667 return (GET_CODE (disp) == SYMBOL_REF
11668 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
11669 case UNSPEC_DTPOFF:
11670 disp = XVECEXP (disp, 0, 0);
11671 return (GET_CODE (disp) == SYMBOL_REF
11672 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
11678 /* Recognizes RTL expressions that are valid memory addresses for an
11679 instruction. The MODE argument is the machine mode for the MEM
11680 expression that wants to use this address.
11682 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
11683 convert common non-canonical forms to canonical form so that they will
11687 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
11688 rtx addr, bool strict)
11690 struct ix86_address parts;
11691 rtx base, index, disp;
11692 HOST_WIDE_INT scale;
11694 if (ix86_decompose_address (addr, &parts) <= 0)
11695 /* Decomposition failed. */
11699 index = parts.index;
11701 scale = parts.scale;
11703 /* Validate base register. */
11710 else if (GET_CODE (base) == SUBREG && REG_P (SUBREG_REG (base)))
11711 reg = SUBREG_REG (base);
11713 /* Base is not a register. */
11716 if (GET_MODE (base) != SImode && GET_MODE (base) != DImode)
11719 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
11720 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
11721 /* Base is not valid. */
11725 /* Validate index register. */
11732 else if (GET_CODE (index) == SUBREG && REG_P (SUBREG_REG (index)))
11733 reg = SUBREG_REG (index);
11735 /* Index is not a register. */
11738 if (GET_MODE (index) != SImode && GET_MODE (index) != DImode)
11741 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
11742 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
11743 /* Index is not valid. */
11747 /* Index and base should have the same mode. */
11749 && GET_MODE (base) != GET_MODE (index))
11752 /* Validate scale factor. */
11756 /* Scale without index. */
11759 if (scale != 2 && scale != 4 && scale != 8)
11760 /* Scale is not a valid multiplier. */
11764 /* Validate displacement. */
11767 if (GET_CODE (disp) == CONST
11768 && GET_CODE (XEXP (disp, 0)) == UNSPEC
11769 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
11770 switch (XINT (XEXP (disp, 0), 1))
11772 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
11773 used. While ABI specify also 32bit relocations, we don't produce
11774 them at all and use IP relative instead. */
11776 case UNSPEC_GOTOFF:
11777 gcc_assert (flag_pic);
11779 goto is_legitimate_pic;
11781 /* 64bit address unspec. */
11784 case UNSPEC_GOTPCREL:
11786 gcc_assert (flag_pic);
11787 goto is_legitimate_pic;
11789 case UNSPEC_GOTTPOFF:
11790 case UNSPEC_GOTNTPOFF:
11791 case UNSPEC_INDNTPOFF:
11792 case UNSPEC_NTPOFF:
11793 case UNSPEC_DTPOFF:
11796 case UNSPEC_STACK_CHECK:
11797 gcc_assert (flag_split_stack);
11801 /* Invalid address unspec. */
11805 else if (SYMBOLIC_CONST (disp)
11809 && MACHOPIC_INDIRECT
11810 && !machopic_operand_p (disp)
11816 if (TARGET_64BIT && (index || base))
11818 /* foo@dtpoff(%rX) is ok. */
11819 if (GET_CODE (disp) != CONST
11820 || GET_CODE (XEXP (disp, 0)) != PLUS
11821 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
11822 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
11823 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
11824 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
11825 /* Non-constant pic memory reference. */
11828 else if ((!TARGET_MACHO || flag_pic)
11829 && ! legitimate_pic_address_disp_p (disp))
11830 /* Displacement is an invalid pic construct. */
11833 else if (MACHO_DYNAMIC_NO_PIC_P
11834 && !ix86_legitimate_constant_p (Pmode, disp))
11835 /* displacment must be referenced via non_lazy_pointer */
11839 /* This code used to verify that a symbolic pic displacement
11840 includes the pic_offset_table_rtx register.
11842 While this is good idea, unfortunately these constructs may
11843 be created by "adds using lea" optimization for incorrect
11852 This code is nonsensical, but results in addressing
11853 GOT table with pic_offset_table_rtx base. We can't
11854 just refuse it easily, since it gets matched by
11855 "addsi3" pattern, that later gets split to lea in the
11856 case output register differs from input. While this
11857 can be handled by separate addsi pattern for this case
11858 that never results in lea, this seems to be easier and
11859 correct fix for crash to disable this test. */
11861 else if (GET_CODE (disp) != LABEL_REF
11862 && !CONST_INT_P (disp)
11863 && (GET_CODE (disp) != CONST
11864 || !ix86_legitimate_constant_p (Pmode, disp))
11865 && (GET_CODE (disp) != SYMBOL_REF
11866 || !ix86_legitimate_constant_p (Pmode, disp)))
11867 /* Displacement is not constant. */
11869 else if (TARGET_64BIT
11870 && !x86_64_immediate_operand (disp, VOIDmode))
11871 /* Displacement is out of range. */
11875 /* Everything looks valid. */
11879 /* Determine if a given RTX is a valid constant address. */
11882 constant_address_p (rtx x)
11884 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
11887 /* Return a unique alias set for the GOT. */
11889 static alias_set_type
11890 ix86_GOT_alias_set (void)
11892 static alias_set_type set = -1;
11894 set = new_alias_set ();
11898 /* Return a legitimate reference for ORIG (an address) using the
11899 register REG. If REG is 0, a new pseudo is generated.
11901 There are two types of references that must be handled:
11903 1. Global data references must load the address from the GOT, via
11904 the PIC reg. An insn is emitted to do this load, and the reg is
11907 2. Static data references, constant pool addresses, and code labels
11908 compute the address as an offset from the GOT, whose base is in
11909 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
11910 differentiate them from global data objects. The returned
11911 address is the PIC reg + an unspec constant.
11913 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
11914 reg also appears in the address. */
11917 legitimize_pic_address (rtx orig, rtx reg)
11920 rtx new_rtx = orig;
11924 if (TARGET_MACHO && !TARGET_64BIT)
11927 reg = gen_reg_rtx (Pmode);
11928 /* Use the generic Mach-O PIC machinery. */
11929 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
11933 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
11935 else if (TARGET_64BIT
11936 && ix86_cmodel != CM_SMALL_PIC
11937 && gotoff_operand (addr, Pmode))
11940 /* This symbol may be referenced via a displacement from the PIC
11941 base address (@GOTOFF). */
11943 if (reload_in_progress)
11944 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
11945 if (GET_CODE (addr) == CONST)
11946 addr = XEXP (addr, 0);
11947 if (GET_CODE (addr) == PLUS)
11949 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
11951 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
11954 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
11955 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
11957 tmpreg = gen_reg_rtx (Pmode);
11960 emit_move_insn (tmpreg, new_rtx);
11964 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
11965 tmpreg, 1, OPTAB_DIRECT);
11968 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
11970 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
11972 /* This symbol may be referenced via a displacement from the PIC
11973 base address (@GOTOFF). */
11975 if (reload_in_progress)
11976 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
11977 if (GET_CODE (addr) == CONST)
11978 addr = XEXP (addr, 0);
11979 if (GET_CODE (addr) == PLUS)
11981 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
11983 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
11986 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
11987 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
11988 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
11992 emit_move_insn (reg, new_rtx);
11996 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
11997 /* We can't use @GOTOFF for text labels on VxWorks;
11998 see gotoff_operand. */
11999 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
12001 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12003 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
12004 return legitimize_dllimport_symbol (addr, true);
12005 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
12006 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
12007 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
12009 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
12010 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
12014 /* For x64 PE-COFF there is no GOT table. So we use address
12016 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12018 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_PCREL);
12019 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12022 reg = gen_reg_rtx (Pmode);
12023 emit_move_insn (reg, new_rtx);
12026 else if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
12028 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
12029 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12030 new_rtx = gen_const_mem (Pmode, new_rtx);
12031 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12034 reg = gen_reg_rtx (Pmode);
12035 /* Use directly gen_movsi, otherwise the address is loaded
12036 into register for CSE. We don't want to CSE this addresses,
12037 instead we CSE addresses from the GOT table, so skip this. */
12038 emit_insn (gen_movsi (reg, new_rtx));
12043 /* This symbol must be referenced via a load from the
12044 Global Offset Table (@GOT). */
12046 if (reload_in_progress)
12047 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12048 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
12049 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12051 new_rtx = force_reg (Pmode, new_rtx);
12052 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12053 new_rtx = gen_const_mem (Pmode, new_rtx);
12054 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
12057 reg = gen_reg_rtx (Pmode);
12058 emit_move_insn (reg, new_rtx);
12064 if (CONST_INT_P (addr)
12065 && !x86_64_immediate_operand (addr, VOIDmode))
12069 emit_move_insn (reg, addr);
12073 new_rtx = force_reg (Pmode, addr);
12075 else if (GET_CODE (addr) == CONST)
12077 addr = XEXP (addr, 0);
12079 /* We must match stuff we generate before. Assume the only
12080 unspecs that can get here are ours. Not that we could do
12081 anything with them anyway.... */
12082 if (GET_CODE (addr) == UNSPEC
12083 || (GET_CODE (addr) == PLUS
12084 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
12086 gcc_assert (GET_CODE (addr) == PLUS);
12088 if (GET_CODE (addr) == PLUS)
12090 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
12092 /* Check first to see if this is a constant offset from a @GOTOFF
12093 symbol reference. */
12094 if (gotoff_operand (op0, Pmode)
12095 && CONST_INT_P (op1))
12099 if (reload_in_progress)
12100 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12101 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
12103 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
12104 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
12105 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
12109 emit_move_insn (reg, new_rtx);
12115 if (INTVAL (op1) < -16*1024*1024
12116 || INTVAL (op1) >= 16*1024*1024)
12118 if (!x86_64_immediate_operand (op1, Pmode))
12119 op1 = force_reg (Pmode, op1);
12120 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
12126 base = legitimize_pic_address (XEXP (addr, 0), reg);
12127 new_rtx = legitimize_pic_address (XEXP (addr, 1),
12128 base == reg ? NULL_RTX : reg);
12130 if (CONST_INT_P (new_rtx))
12131 new_rtx = plus_constant (base, INTVAL (new_rtx));
12134 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
12136 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
12137 new_rtx = XEXP (new_rtx, 1);
12139 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
12147 /* Load the thread pointer. If TO_REG is true, force it into a register. */
12150 get_thread_pointer (bool to_reg)
12152 rtx tp = gen_rtx_UNSPEC (ptr_mode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
12154 if (GET_MODE (tp) != Pmode)
12155 tp = convert_to_mode (Pmode, tp, 1);
12158 tp = copy_addr_to_reg (tp);
12163 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12165 static GTY(()) rtx ix86_tls_symbol;
12168 ix86_tls_get_addr (void)
12170 if (!ix86_tls_symbol)
12173 = ((TARGET_ANY_GNU_TLS && !TARGET_64BIT)
12174 ? "___tls_get_addr" : "__tls_get_addr");
12176 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, sym);
12179 return ix86_tls_symbol;
12182 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
12184 static GTY(()) rtx ix86_tls_module_base_symbol;
12187 ix86_tls_module_base (void)
12189 if (!ix86_tls_module_base_symbol)
12191 ix86_tls_module_base_symbol
12192 = gen_rtx_SYMBOL_REF (Pmode, "_TLS_MODULE_BASE_");
12194 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
12195 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
12198 return ix86_tls_module_base_symbol;
12201 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
12202 false if we expect this to be used for a memory address and true if
12203 we expect to load the address into a register. */
12206 legitimize_tls_address (rtx x, enum tls_model model, bool for_mov)
12208 rtx dest, base, off;
12209 rtx pic = NULL_RTX, tp = NULL_RTX;
12214 case TLS_MODEL_GLOBAL_DYNAMIC:
12215 dest = gen_reg_rtx (Pmode);
12220 pic = pic_offset_table_rtx;
12223 pic = gen_reg_rtx (Pmode);
12224 emit_insn (gen_set_got (pic));
12228 if (TARGET_GNU2_TLS)
12231 emit_insn (gen_tls_dynamic_gnu2_64 (dest, x));
12233 emit_insn (gen_tls_dynamic_gnu2_32 (dest, x, pic));
12235 tp = get_thread_pointer (true);
12236 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
12238 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12242 rtx caddr = ix86_tls_get_addr ();
12246 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
12249 emit_call_insn (gen_tls_global_dynamic_64 (rax, x, caddr));
12250 insns = get_insns ();
12253 RTL_CONST_CALL_P (insns) = 1;
12254 emit_libcall_block (insns, dest, rax, x);
12257 emit_insn (gen_tls_global_dynamic_32 (dest, x, pic, caddr));
12261 case TLS_MODEL_LOCAL_DYNAMIC:
12262 base = gen_reg_rtx (Pmode);
12267 pic = pic_offset_table_rtx;
12270 pic = gen_reg_rtx (Pmode);
12271 emit_insn (gen_set_got (pic));
12275 if (TARGET_GNU2_TLS)
12277 rtx tmp = ix86_tls_module_base ();
12280 emit_insn (gen_tls_dynamic_gnu2_64 (base, tmp));
12282 emit_insn (gen_tls_dynamic_gnu2_32 (base, tmp, pic));
12284 tp = get_thread_pointer (true);
12285 set_unique_reg_note (get_last_insn (), REG_EQUIV,
12286 gen_rtx_MINUS (Pmode, tmp, tp));
12290 rtx caddr = ix86_tls_get_addr ();
12294 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, eqv;
12297 emit_call_insn (gen_tls_local_dynamic_base_64 (rax, caddr));
12298 insns = get_insns ();
12301 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
12302 share the LD_BASE result with other LD model accesses. */
12303 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
12304 UNSPEC_TLS_LD_BASE);
12306 RTL_CONST_CALL_P (insns) = 1;
12307 emit_libcall_block (insns, base, rax, eqv);
12310 emit_insn (gen_tls_local_dynamic_base_32 (base, pic, caddr));
12313 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
12314 off = gen_rtx_CONST (Pmode, off);
12316 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
12318 if (TARGET_GNU2_TLS)
12320 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
12322 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
12326 case TLS_MODEL_INITIAL_EXEC:
12329 if (TARGET_SUN_TLS)
12331 /* The Sun linker took the AMD64 TLS spec literally
12332 and can only handle %rax as destination of the
12333 initial executable code sequence. */
12335 dest = gen_reg_rtx (Pmode);
12336 emit_insn (gen_tls_initial_exec_64_sun (dest, x));
12341 type = UNSPEC_GOTNTPOFF;
12345 if (reload_in_progress)
12346 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12347 pic = pic_offset_table_rtx;
12348 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
12350 else if (!TARGET_ANY_GNU_TLS)
12352 pic = gen_reg_rtx (Pmode);
12353 emit_insn (gen_set_got (pic));
12354 type = UNSPEC_GOTTPOFF;
12359 type = UNSPEC_INDNTPOFF;
12362 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
12363 off = gen_rtx_CONST (Pmode, off);
12365 off = gen_rtx_PLUS (Pmode, pic, off);
12366 off = gen_const_mem (Pmode, off);
12367 set_mem_alias_set (off, ix86_GOT_alias_set ());
12369 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12371 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12372 off = force_reg (Pmode, off);
12373 return gen_rtx_PLUS (Pmode, base, off);
12377 base = get_thread_pointer (true);
12378 dest = gen_reg_rtx (Pmode);
12379 emit_insn (gen_subsi3 (dest, base, off));
12383 case TLS_MODEL_LOCAL_EXEC:
12384 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
12385 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12386 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
12387 off = gen_rtx_CONST (Pmode, off);
12389 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
12391 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
12392 return gen_rtx_PLUS (Pmode, base, off);
12396 base = get_thread_pointer (true);
12397 dest = gen_reg_rtx (Pmode);
12398 emit_insn (gen_subsi3 (dest, base, off));
12403 gcc_unreachable ();
12409 /* Create or return the unique __imp_DECL dllimport symbol corresponding
12412 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
12413 htab_t dllimport_map;
12416 get_dllimport_decl (tree decl)
12418 struct tree_map *h, in;
12421 const char *prefix;
12422 size_t namelen, prefixlen;
12427 if (!dllimport_map)
12428 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
12430 in.hash = htab_hash_pointer (decl);
12431 in.base.from = decl;
12432 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
12433 h = (struct tree_map *) *loc;
12437 *loc = h = ggc_alloc_tree_map ();
12439 h->base.from = decl;
12440 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
12441 VAR_DECL, NULL, ptr_type_node);
12442 DECL_ARTIFICIAL (to) = 1;
12443 DECL_IGNORED_P (to) = 1;
12444 DECL_EXTERNAL (to) = 1;
12445 TREE_READONLY (to) = 1;
12447 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
12448 name = targetm.strip_name_encoding (name);
12449 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
12450 ? "*__imp_" : "*__imp__";
12451 namelen = strlen (name);
12452 prefixlen = strlen (prefix);
12453 imp_name = (char *) alloca (namelen + prefixlen + 1);
12454 memcpy (imp_name, prefix, prefixlen);
12455 memcpy (imp_name + prefixlen, name, namelen + 1);
12457 name = ggc_alloc_string (imp_name, namelen + prefixlen);
12458 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
12459 SET_SYMBOL_REF_DECL (rtl, to);
12460 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
12462 rtl = gen_const_mem (Pmode, rtl);
12463 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
12465 SET_DECL_RTL (to, rtl);
12466 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
12471 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
12472 true if we require the result be a register. */
12475 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
12480 gcc_assert (SYMBOL_REF_DECL (symbol));
12481 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
12483 x = DECL_RTL (imp_decl);
12485 x = force_reg (Pmode, x);
12489 /* Try machine-dependent ways of modifying an illegitimate address
12490 to be legitimate. If we find one, return the new, valid address.
12491 This macro is used in only one place: `memory_address' in explow.c.
12493 OLDX is the address as it was before break_out_memory_refs was called.
12494 In some cases it is useful to look at this to decide what needs to be done.
12496 It is always safe for this macro to do nothing. It exists to recognize
12497 opportunities to optimize the output.
12499 For the 80386, we handle X+REG by loading X into a register R and
12500 using R+REG. R will go in a general reg and indexing will be used.
12501 However, if REG is a broken-out memory address or multiplication,
12502 nothing needs to be done because REG can certainly go in a general reg.
12504 When -fpic is used, special handling is needed for symbolic references.
12505 See comments by legitimize_pic_address in i386.c for details. */
12508 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
12509 enum machine_mode mode)
12514 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
12516 return legitimize_tls_address (x, (enum tls_model) log, false);
12517 if (GET_CODE (x) == CONST
12518 && GET_CODE (XEXP (x, 0)) == PLUS
12519 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12520 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
12522 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
12523 (enum tls_model) log, false);
12524 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12527 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
12529 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
12530 return legitimize_dllimport_symbol (x, true);
12531 if (GET_CODE (x) == CONST
12532 && GET_CODE (XEXP (x, 0)) == PLUS
12533 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
12534 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
12536 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
12537 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
12541 if (flag_pic && SYMBOLIC_CONST (x))
12542 return legitimize_pic_address (x, 0);
12545 if (MACHO_DYNAMIC_NO_PIC_P && SYMBOLIC_CONST (x))
12546 return machopic_indirect_data_reference (x, 0);
12549 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
12550 if (GET_CODE (x) == ASHIFT
12551 && CONST_INT_P (XEXP (x, 1))
12552 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
12555 log = INTVAL (XEXP (x, 1));
12556 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
12557 GEN_INT (1 << log));
12560 if (GET_CODE (x) == PLUS)
12562 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
12564 if (GET_CODE (XEXP (x, 0)) == ASHIFT
12565 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
12566 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
12569 log = INTVAL (XEXP (XEXP (x, 0), 1));
12570 XEXP (x, 0) = gen_rtx_MULT (Pmode,
12571 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
12572 GEN_INT (1 << log));
12575 if (GET_CODE (XEXP (x, 1)) == ASHIFT
12576 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
12577 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
12580 log = INTVAL (XEXP (XEXP (x, 1), 1));
12581 XEXP (x, 1) = gen_rtx_MULT (Pmode,
12582 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
12583 GEN_INT (1 << log));
12586 /* Put multiply first if it isn't already. */
12587 if (GET_CODE (XEXP (x, 1)) == MULT)
12589 rtx tmp = XEXP (x, 0);
12590 XEXP (x, 0) = XEXP (x, 1);
12595 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
12596 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
12597 created by virtual register instantiation, register elimination, and
12598 similar optimizations. */
12599 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
12602 x = gen_rtx_PLUS (Pmode,
12603 gen_rtx_PLUS (Pmode, XEXP (x, 0),
12604 XEXP (XEXP (x, 1), 0)),
12605 XEXP (XEXP (x, 1), 1));
12609 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
12610 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
12611 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
12612 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
12613 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
12614 && CONSTANT_P (XEXP (x, 1)))
12617 rtx other = NULL_RTX;
12619 if (CONST_INT_P (XEXP (x, 1)))
12621 constant = XEXP (x, 1);
12622 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
12624 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
12626 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
12627 other = XEXP (x, 1);
12635 x = gen_rtx_PLUS (Pmode,
12636 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
12637 XEXP (XEXP (XEXP (x, 0), 1), 0)),
12638 plus_constant (other, INTVAL (constant)));
12642 if (changed && ix86_legitimate_address_p (mode, x, false))
12645 if (GET_CODE (XEXP (x, 0)) == MULT)
12648 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
12651 if (GET_CODE (XEXP (x, 1)) == MULT)
12654 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
12658 && REG_P (XEXP (x, 1))
12659 && REG_P (XEXP (x, 0)))
12662 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
12665 x = legitimize_pic_address (x, 0);
12668 if (changed && ix86_legitimate_address_p (mode, x, false))
12671 if (REG_P (XEXP (x, 0)))
12673 rtx temp = gen_reg_rtx (Pmode);
12674 rtx val = force_operand (XEXP (x, 1), temp);
12677 if (GET_MODE (val) != Pmode)
12678 val = convert_to_mode (Pmode, val, 1);
12679 emit_move_insn (temp, val);
12682 XEXP (x, 1) = temp;
12686 else if (REG_P (XEXP (x, 1)))
12688 rtx temp = gen_reg_rtx (Pmode);
12689 rtx val = force_operand (XEXP (x, 0), temp);
12692 if (GET_MODE (val) != Pmode)
12693 val = convert_to_mode (Pmode, val, 1);
12694 emit_move_insn (temp, val);
12697 XEXP (x, 0) = temp;
12705 /* Print an integer constant expression in assembler syntax. Addition
12706 and subtraction are the only arithmetic that may appear in these
12707 expressions. FILE is the stdio stream to write to, X is the rtx, and
12708 CODE is the operand print code from the output string. */
12711 output_pic_addr_const (FILE *file, rtx x, int code)
12715 switch (GET_CODE (x))
12718 gcc_assert (flag_pic);
12723 if (TARGET_64BIT || ! TARGET_MACHO_BRANCH_ISLANDS)
12724 output_addr_const (file, x);
12727 const char *name = XSTR (x, 0);
12729 /* Mark the decl as referenced so that cgraph will
12730 output the function. */
12731 if (SYMBOL_REF_DECL (x))
12732 mark_decl_referenced (SYMBOL_REF_DECL (x));
12735 if (MACHOPIC_INDIRECT
12736 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
12737 name = machopic_indirection_name (x, /*stub_p=*/true);
12739 assemble_name (file, name);
12741 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
12742 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
12743 fputs ("@PLT", file);
12750 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
12751 assemble_name (asm_out_file, buf);
12755 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12759 /* This used to output parentheses around the expression,
12760 but that does not work on the 386 (either ATT or BSD assembler). */
12761 output_pic_addr_const (file, XEXP (x, 0), code);
12765 if (GET_MODE (x) == VOIDmode)
12767 /* We can use %d if the number is <32 bits and positive. */
12768 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
12769 fprintf (file, "0x%lx%08lx",
12770 (unsigned long) CONST_DOUBLE_HIGH (x),
12771 (unsigned long) CONST_DOUBLE_LOW (x));
12773 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
12776 /* We can't handle floating point constants;
12777 TARGET_PRINT_OPERAND must handle them. */
12778 output_operand_lossage ("floating constant misused");
12782 /* Some assemblers need integer constants to appear first. */
12783 if (CONST_INT_P (XEXP (x, 0)))
12785 output_pic_addr_const (file, XEXP (x, 0), code);
12787 output_pic_addr_const (file, XEXP (x, 1), code);
12791 gcc_assert (CONST_INT_P (XEXP (x, 1)));
12792 output_pic_addr_const (file, XEXP (x, 1), code);
12794 output_pic_addr_const (file, XEXP (x, 0), code);
12800 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
12801 output_pic_addr_const (file, XEXP (x, 0), code);
12803 output_pic_addr_const (file, XEXP (x, 1), code);
12805 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
12809 if (XINT (x, 1) == UNSPEC_STACK_CHECK)
12811 bool f = i386_asm_output_addr_const_extra (file, x);
12816 gcc_assert (XVECLEN (x, 0) == 1);
12817 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
12818 switch (XINT (x, 1))
12821 fputs ("@GOT", file);
12823 case UNSPEC_GOTOFF:
12824 fputs ("@GOTOFF", file);
12826 case UNSPEC_PLTOFF:
12827 fputs ("@PLTOFF", file);
12830 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12831 "(%rip)" : "[rip]", file);
12833 case UNSPEC_GOTPCREL:
12834 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12835 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
12837 case UNSPEC_GOTTPOFF:
12838 /* FIXME: This might be @TPOFF in Sun ld too. */
12839 fputs ("@gottpoff", file);
12842 fputs ("@tpoff", file);
12844 case UNSPEC_NTPOFF:
12846 fputs ("@tpoff", file);
12848 fputs ("@ntpoff", file);
12850 case UNSPEC_DTPOFF:
12851 fputs ("@dtpoff", file);
12853 case UNSPEC_GOTNTPOFF:
12855 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12856 "@gottpoff(%rip)": "@gottpoff[rip]", file);
12858 fputs ("@gotntpoff", file);
12860 case UNSPEC_INDNTPOFF:
12861 fputs ("@indntpoff", file);
12864 case UNSPEC_MACHOPIC_OFFSET:
12866 machopic_output_function_base_name (file);
12870 output_operand_lossage ("invalid UNSPEC as operand");
12876 output_operand_lossage ("invalid expression as operand");
12880 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12881 We need to emit DTP-relative relocations. */
12883 static void ATTRIBUTE_UNUSED
12884 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
12886 fputs (ASM_LONG, file);
12887 output_addr_const (file, x);
12888 fputs ("@dtpoff", file);
12894 fputs (", 0", file);
12897 gcc_unreachable ();
12901 /* Return true if X is a representation of the PIC register. This copes
12902 with calls from ix86_find_base_term, where the register might have
12903 been replaced by a cselib value. */
12906 ix86_pic_register_p (rtx x)
12908 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
12909 return (pic_offset_table_rtx
12910 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
12912 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
12915 /* Helper function for ix86_delegitimize_address.
12916 Attempt to delegitimize TLS local-exec accesses. */
12919 ix86_delegitimize_tls_address (rtx orig_x)
12921 rtx x = orig_x, unspec;
12922 struct ix86_address addr;
12924 if (!TARGET_TLS_DIRECT_SEG_REFS)
12928 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
12930 if (ix86_decompose_address (x, &addr) == 0
12931 || addr.seg != (TARGET_64BIT ? SEG_FS : SEG_GS)
12932 || addr.disp == NULL_RTX
12933 || GET_CODE (addr.disp) != CONST)
12935 unspec = XEXP (addr.disp, 0);
12936 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
12937 unspec = XEXP (unspec, 0);
12938 if (GET_CODE (unspec) != UNSPEC || XINT (unspec, 1) != UNSPEC_NTPOFF)
12940 x = XVECEXP (unspec, 0, 0);
12941 gcc_assert (GET_CODE (x) == SYMBOL_REF);
12942 if (unspec != XEXP (addr.disp, 0))
12943 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.disp, 0), 1));
12946 rtx idx = addr.index;
12947 if (addr.scale != 1)
12948 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
12949 x = gen_rtx_PLUS (Pmode, idx, x);
12952 x = gen_rtx_PLUS (Pmode, addr.base, x);
12953 if (MEM_P (orig_x))
12954 x = replace_equiv_address_nv (orig_x, x);
12958 /* In the name of slightly smaller debug output, and to cater to
12959 general assembler lossage, recognize PIC+GOTOFF and turn it back
12960 into a direct symbol reference.
12962 On Darwin, this is necessary to avoid a crash, because Darwin
12963 has a different PIC label for each routine but the DWARF debugging
12964 information is not associated with any particular routine, so it's
12965 necessary to remove references to the PIC label from RTL stored by
12966 the DWARF output code. */
12969 ix86_delegitimize_address (rtx x)
12971 rtx orig_x = delegitimize_mem_from_attrs (x);
12972 /* addend is NULL or some rtx if x is something+GOTOFF where
12973 something doesn't include the PIC register. */
12974 rtx addend = NULL_RTX;
12975 /* reg_addend is NULL or a multiple of some register. */
12976 rtx reg_addend = NULL_RTX;
12977 /* const_addend is NULL or a const_int. */
12978 rtx const_addend = NULL_RTX;
12979 /* This is the result, or NULL. */
12980 rtx result = NULL_RTX;
12989 if (GET_CODE (x) != CONST
12990 || GET_CODE (XEXP (x, 0)) != UNSPEC
12991 || (XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
12992 && XINT (XEXP (x, 0), 1) != UNSPEC_PCREL)
12993 || !MEM_P (orig_x))
12994 return ix86_delegitimize_tls_address (orig_x);
12995 x = XVECEXP (XEXP (x, 0), 0, 0);
12996 if (GET_MODE (orig_x) != GET_MODE (x))
12998 x = simplify_gen_subreg (GET_MODE (orig_x), x,
13006 if (GET_CODE (x) != PLUS
13007 || GET_CODE (XEXP (x, 1)) != CONST)
13008 return ix86_delegitimize_tls_address (orig_x);
13010 if (ix86_pic_register_p (XEXP (x, 0)))
13011 /* %ebx + GOT/GOTOFF */
13013 else if (GET_CODE (XEXP (x, 0)) == PLUS)
13015 /* %ebx + %reg * scale + GOT/GOTOFF */
13016 reg_addend = XEXP (x, 0);
13017 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
13018 reg_addend = XEXP (reg_addend, 1);
13019 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
13020 reg_addend = XEXP (reg_addend, 0);
13023 reg_addend = NULL_RTX;
13024 addend = XEXP (x, 0);
13028 addend = XEXP (x, 0);
13030 x = XEXP (XEXP (x, 1), 0);
13031 if (GET_CODE (x) == PLUS
13032 && CONST_INT_P (XEXP (x, 1)))
13034 const_addend = XEXP (x, 1);
13038 if (GET_CODE (x) == UNSPEC
13039 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
13040 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
13041 result = XVECEXP (x, 0, 0);
13043 if (TARGET_MACHO && darwin_local_data_pic (x)
13044 && !MEM_P (orig_x))
13045 result = XVECEXP (x, 0, 0);
13048 return ix86_delegitimize_tls_address (orig_x);
13051 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
13053 result = gen_rtx_PLUS (Pmode, reg_addend, result);
13056 /* If the rest of original X doesn't involve the PIC register, add
13057 addend and subtract pic_offset_table_rtx. This can happen e.g.
13059 leal (%ebx, %ecx, 4), %ecx
13061 movl foo@GOTOFF(%ecx), %edx
13062 in which case we return (%ecx - %ebx) + foo. */
13063 if (pic_offset_table_rtx)
13064 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
13065 pic_offset_table_rtx),
13070 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
13072 result = simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
13073 if (result == NULL_RTX)
13079 /* If X is a machine specific address (i.e. a symbol or label being
13080 referenced as a displacement from the GOT implemented using an
13081 UNSPEC), then return the base term. Otherwise return X. */
13084 ix86_find_base_term (rtx x)
13090 if (GET_CODE (x) != CONST)
13092 term = XEXP (x, 0);
13093 if (GET_CODE (term) == PLUS
13094 && (CONST_INT_P (XEXP (term, 1))
13095 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
13096 term = XEXP (term, 0);
13097 if (GET_CODE (term) != UNSPEC
13098 || (XINT (term, 1) != UNSPEC_GOTPCREL
13099 && XINT (term, 1) != UNSPEC_PCREL))
13102 return XVECEXP (term, 0, 0);
13105 return ix86_delegitimize_address (x);
13109 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
13110 int fp, FILE *file)
13112 const char *suffix;
13114 if (mode == CCFPmode || mode == CCFPUmode)
13116 code = ix86_fp_compare_code_to_integer (code);
13120 code = reverse_condition (code);
13171 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
13175 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
13176 Those same assemblers have the same but opposite lossage on cmov. */
13177 if (mode == CCmode)
13178 suffix = fp ? "nbe" : "a";
13179 else if (mode == CCCmode)
13182 gcc_unreachable ();
13198 gcc_unreachable ();
13202 gcc_assert (mode == CCmode || mode == CCCmode);
13219 gcc_unreachable ();
13223 /* ??? As above. */
13224 gcc_assert (mode == CCmode || mode == CCCmode);
13225 suffix = fp ? "nb" : "ae";
13228 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
13232 /* ??? As above. */
13233 if (mode == CCmode)
13235 else if (mode == CCCmode)
13236 suffix = fp ? "nb" : "ae";
13238 gcc_unreachable ();
13241 suffix = fp ? "u" : "p";
13244 suffix = fp ? "nu" : "np";
13247 gcc_unreachable ();
13249 fputs (suffix, file);
13252 /* Print the name of register X to FILE based on its machine mode and number.
13253 If CODE is 'w', pretend the mode is HImode.
13254 If CODE is 'b', pretend the mode is QImode.
13255 If CODE is 'k', pretend the mode is SImode.
13256 If CODE is 'q', pretend the mode is DImode.
13257 If CODE is 'x', pretend the mode is V4SFmode.
13258 If CODE is 't', pretend the mode is V8SFmode.
13259 If CODE is 'h', pretend the reg is the 'high' byte register.
13260 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
13261 If CODE is 'd', duplicate the operand for AVX instruction.
13265 print_reg (rtx x, int code, FILE *file)
13268 bool duplicated = code == 'd' && TARGET_AVX;
13270 gcc_assert (x == pc_rtx
13271 || (REGNO (x) != ARG_POINTER_REGNUM
13272 && REGNO (x) != FRAME_POINTER_REGNUM
13273 && REGNO (x) != FLAGS_REG
13274 && REGNO (x) != FPSR_REG
13275 && REGNO (x) != FPCR_REG));
13277 if (ASSEMBLER_DIALECT == ASM_ATT)
13282 gcc_assert (TARGET_64BIT);
13283 fputs ("rip", file);
13287 if (code == 'w' || MMX_REG_P (x))
13289 else if (code == 'b')
13291 else if (code == 'k')
13293 else if (code == 'q')
13295 else if (code == 'y')
13297 else if (code == 'h')
13299 else if (code == 'x')
13301 else if (code == 't')
13304 code = GET_MODE_SIZE (GET_MODE (x));
13306 /* Irritatingly, AMD extended registers use different naming convention
13307 from the normal registers. */
13308 if (REX_INT_REG_P (x))
13310 gcc_assert (TARGET_64BIT);
13314 error ("extended registers have no high halves");
13317 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
13320 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
13323 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
13326 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
13329 error ("unsupported operand size for extended register");
13339 if (STACK_TOP_P (x))
13348 if (! ANY_FP_REG_P (x))
13349 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
13354 reg = hi_reg_name[REGNO (x)];
13357 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
13359 reg = qi_reg_name[REGNO (x)];
13362 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
13364 reg = qi_high_reg_name[REGNO (x)];
13369 gcc_assert (!duplicated);
13371 fputs (hi_reg_name[REGNO (x)] + 1, file);
13376 gcc_unreachable ();
13382 if (ASSEMBLER_DIALECT == ASM_ATT)
13383 fprintf (file, ", %%%s", reg);
13385 fprintf (file, ", %s", reg);
13389 /* Locate some local-dynamic symbol still in use by this function
13390 so that we can print its name in some tls_local_dynamic_base
13394 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
13398 if (GET_CODE (x) == SYMBOL_REF
13399 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
13401 cfun->machine->some_ld_name = XSTR (x, 0);
13408 static const char *
13409 get_some_local_dynamic_name (void)
13413 if (cfun->machine->some_ld_name)
13414 return cfun->machine->some_ld_name;
13416 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
13417 if (NONDEBUG_INSN_P (insn)
13418 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
13419 return cfun->machine->some_ld_name;
13424 /* Meaning of CODE:
13425 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
13426 C -- print opcode suffix for set/cmov insn.
13427 c -- like C, but print reversed condition
13428 F,f -- likewise, but for floating-point.
13429 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
13431 R -- print the prefix for register names.
13432 z -- print the opcode suffix for the size of the current operand.
13433 Z -- likewise, with special suffixes for x87 instructions.
13434 * -- print a star (in certain assembler syntax)
13435 A -- print an absolute memory reference.
13436 w -- print the operand as if it's a "word" (HImode) even if it isn't.
13437 s -- print a shift double count, followed by the assemblers argument
13439 b -- print the QImode name of the register for the indicated operand.
13440 %b0 would print %al if operands[0] is reg 0.
13441 w -- likewise, print the HImode name of the register.
13442 k -- likewise, print the SImode name of the register.
13443 q -- likewise, print the DImode name of the register.
13444 x -- likewise, print the V4SFmode name of the register.
13445 t -- likewise, print the V8SFmode name of the register.
13446 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
13447 y -- print "st(0)" instead of "st" as a register.
13448 d -- print duplicated register operand for AVX instruction.
13449 D -- print condition for SSE cmp instruction.
13450 P -- if PIC, print an @PLT suffix.
13451 p -- print raw symbol name.
13452 X -- don't print any sort of PIC '@' suffix for a symbol.
13453 & -- print some in-use local-dynamic symbol name.
13454 H -- print a memory address offset by 8; used for sse high-parts
13455 Y -- print condition for XOP pcom* instruction.
13456 + -- print a branch hint as 'cs' or 'ds' prefix
13457 ; -- print a semicolon (after prefixes due to bug in older gas).
13458 @ -- print a segment register of thread base pointer load
13462 ix86_print_operand (FILE *file, rtx x, int code)
13469 if (ASSEMBLER_DIALECT == ASM_ATT)
13475 const char *name = get_some_local_dynamic_name ();
13477 output_operand_lossage ("'%%&' used without any "
13478 "local dynamic TLS references");
13480 assemble_name (file, name);
13485 switch (ASSEMBLER_DIALECT)
13492 /* Intel syntax. For absolute addresses, registers should not
13493 be surrounded by braces. */
13497 ix86_print_operand (file, x, 0);
13504 gcc_unreachable ();
13507 ix86_print_operand (file, x, 0);
13512 if (ASSEMBLER_DIALECT == ASM_ATT)
13517 if (ASSEMBLER_DIALECT == ASM_ATT)
13522 if (ASSEMBLER_DIALECT == ASM_ATT)
13527 if (ASSEMBLER_DIALECT == ASM_ATT)
13532 if (ASSEMBLER_DIALECT == ASM_ATT)
13537 if (ASSEMBLER_DIALECT == ASM_ATT)
13542 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
13544 /* Opcodes don't get size suffixes if using Intel opcodes. */
13545 if (ASSEMBLER_DIALECT == ASM_INTEL)
13548 switch (GET_MODE_SIZE (GET_MODE (x)))
13567 output_operand_lossage
13568 ("invalid operand size for operand code '%c'", code);
13573 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
13575 (0, "non-integer operand used with operand code '%c'", code);
13579 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
13580 if (ASSEMBLER_DIALECT == ASM_INTEL)
13583 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
13585 switch (GET_MODE_SIZE (GET_MODE (x)))
13588 #ifdef HAVE_AS_IX86_FILDS
13598 #ifdef HAVE_AS_IX86_FILDQ
13601 fputs ("ll", file);
13609 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
13611 /* 387 opcodes don't get size suffixes
13612 if the operands are registers. */
13613 if (STACK_REG_P (x))
13616 switch (GET_MODE_SIZE (GET_MODE (x)))
13637 output_operand_lossage
13638 ("invalid operand type used with operand code '%c'", code);
13642 output_operand_lossage
13643 ("invalid operand size for operand code '%c'", code);
13661 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
13663 ix86_print_operand (file, x, 0);
13664 fputs (", ", file);
13669 /* Little bit of braindamage here. The SSE compare instructions
13670 does use completely different names for the comparisons that the
13671 fp conditional moves. */
13674 switch (GET_CODE (x))
13677 fputs ("eq", file);
13680 fputs ("eq_us", file);
13683 fputs ("lt", file);
13686 fputs ("nge", file);
13689 fputs ("le", file);
13692 fputs ("ngt", file);
13695 fputs ("unord", file);
13698 fputs ("neq", file);
13701 fputs ("neq_oq", file);
13704 fputs ("ge", file);
13707 fputs ("nlt", file);
13710 fputs ("gt", file);
13713 fputs ("nle", file);
13716 fputs ("ord", file);
13719 output_operand_lossage ("operand is not a condition code, "
13720 "invalid operand code 'D'");
13726 switch (GET_CODE (x))
13730 fputs ("eq", file);
13734 fputs ("lt", file);
13738 fputs ("le", file);
13741 fputs ("unord", file);
13745 fputs ("neq", file);
13749 fputs ("nlt", file);
13753 fputs ("nle", file);
13756 fputs ("ord", file);
13759 output_operand_lossage ("operand is not a condition code, "
13760 "invalid operand code 'D'");
13766 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13767 if (ASSEMBLER_DIALECT == ASM_ATT)
13769 switch (GET_MODE (x))
13771 case HImode: putc ('w', file); break;
13773 case SFmode: putc ('l', file); break;
13775 case DFmode: putc ('q', file); break;
13776 default: gcc_unreachable ();
13783 if (!COMPARISON_P (x))
13785 output_operand_lossage ("operand is neither a constant nor a "
13786 "condition code, invalid operand code "
13790 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
13793 if (!COMPARISON_P (x))
13795 output_operand_lossage ("operand is neither a constant nor a "
13796 "condition code, invalid operand code "
13800 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13801 if (ASSEMBLER_DIALECT == ASM_ATT)
13804 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
13807 /* Like above, but reverse condition */
13809 /* Check to see if argument to %c is really a constant
13810 and not a condition code which needs to be reversed. */
13811 if (!COMPARISON_P (x))
13813 output_operand_lossage ("operand is neither a constant nor a "
13814 "condition code, invalid operand "
13818 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
13821 if (!COMPARISON_P (x))
13823 output_operand_lossage ("operand is neither a constant nor a "
13824 "condition code, invalid operand "
13828 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
13829 if (ASSEMBLER_DIALECT == ASM_ATT)
13832 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
13836 /* It doesn't actually matter what mode we use here, as we're
13837 only going to use this for printing. */
13838 x = adjust_address_nv (x, DImode, 8);
13846 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
13849 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
13852 int pred_val = INTVAL (XEXP (x, 0));
13854 if (pred_val < REG_BR_PROB_BASE * 45 / 100
13855 || pred_val > REG_BR_PROB_BASE * 55 / 100)
13857 int taken = pred_val > REG_BR_PROB_BASE / 2;
13858 int cputaken = final_forward_branch_p (current_output_insn) == 0;
13860 /* Emit hints only in the case default branch prediction
13861 heuristics would fail. */
13862 if (taken != cputaken)
13864 /* We use 3e (DS) prefix for taken branches and
13865 2e (CS) prefix for not taken branches. */
13867 fputs ("ds ; ", file);
13869 fputs ("cs ; ", file);
13877 switch (GET_CODE (x))
13880 fputs ("neq", file);
13883 fputs ("eq", file);
13887 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
13891 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
13895 fputs ("le", file);
13899 fputs ("lt", file);
13902 fputs ("unord", file);
13905 fputs ("ord", file);
13908 fputs ("ueq", file);
13911 fputs ("nlt", file);
13914 fputs ("nle", file);
13917 fputs ("ule", file);
13920 fputs ("ult", file);
13923 fputs ("une", file);
13926 output_operand_lossage ("operand is not a condition code, "
13927 "invalid operand code 'Y'");
13933 #ifndef HAVE_AS_IX86_REP_LOCK_PREFIX
13939 if (ASSEMBLER_DIALECT == ASM_ATT)
13942 /* The kernel uses a different segment register for performance
13943 reasons; a system call would not have to trash the userspace
13944 segment register, which would be expensive. */
13945 if (TARGET_64BIT && ix86_cmodel != CM_KERNEL)
13946 fputs ("fs", file);
13948 fputs ("gs", file);
13952 output_operand_lossage ("invalid operand code '%c'", code);
13957 print_reg (x, code, file);
13959 else if (MEM_P (x))
13961 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
13962 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
13963 && GET_MODE (x) != BLKmode)
13966 switch (GET_MODE_SIZE (GET_MODE (x)))
13968 case 1: size = "BYTE"; break;
13969 case 2: size = "WORD"; break;
13970 case 4: size = "DWORD"; break;
13971 case 8: size = "QWORD"; break;
13972 case 12: size = "TBYTE"; break;
13974 if (GET_MODE (x) == XFmode)
13979 case 32: size = "YMMWORD"; break;
13981 gcc_unreachable ();
13984 /* Check for explicit size override (codes 'b', 'w' and 'k') */
13987 else if (code == 'w')
13989 else if (code == 'k')
13992 fputs (size, file);
13993 fputs (" PTR ", file);
13997 /* Avoid (%rip) for call operands. */
13998 if (CONSTANT_ADDRESS_P (x) && code == 'P'
13999 && !CONST_INT_P (x))
14000 output_addr_const (file, x);
14001 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
14002 output_operand_lossage ("invalid constraints for operand");
14004 output_address (x);
14007 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
14012 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14013 REAL_VALUE_TO_TARGET_SINGLE (r, l);
14015 if (ASSEMBLER_DIALECT == ASM_ATT)
14017 /* Sign extend 32bit SFmode immediate to 8 bytes. */
14019 fprintf (file, "0x%08llx", (unsigned long long) (int) l);
14021 fprintf (file, "0x%08x", (unsigned int) l);
14024 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
14029 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
14030 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
14032 if (ASSEMBLER_DIALECT == ASM_ATT)
14034 fprintf (file, "0x%lx%08lx", l[1] & 0xffffffff, l[0] & 0xffffffff);
14037 /* These float cases don't actually occur as immediate operands. */
14038 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode)
14042 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
14043 fputs (dstr, file);
14048 /* We have patterns that allow zero sets of memory, for instance.
14049 In 64-bit mode, we should probably support all 8-byte vectors,
14050 since we can in fact encode that into an immediate. */
14051 if (GET_CODE (x) == CONST_VECTOR)
14053 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
14057 if (code != 'P' && code != 'p')
14059 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
14061 if (ASSEMBLER_DIALECT == ASM_ATT)
14064 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
14065 || GET_CODE (x) == LABEL_REF)
14067 if (ASSEMBLER_DIALECT == ASM_ATT)
14070 fputs ("OFFSET FLAT:", file);
14073 if (CONST_INT_P (x))
14074 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
14075 else if (flag_pic || MACHOPIC_INDIRECT)
14076 output_pic_addr_const (file, x, code);
14078 output_addr_const (file, x);
14083 ix86_print_operand_punct_valid_p (unsigned char code)
14085 return (code == '@' || code == '*' || code == '+'
14086 || code == '&' || code == ';');
14089 /* Print a memory operand whose address is ADDR. */
14092 ix86_print_operand_address (FILE *file, rtx addr)
14094 struct ix86_address parts;
14095 rtx base, index, disp;
14097 int ok = ix86_decompose_address (addr, &parts);
14101 if (parts.base && GET_CODE (parts.base) == SUBREG)
14103 rtx tmp = SUBREG_REG (parts.base);
14104 parts.base = simplify_subreg (GET_MODE (parts.base),
14105 tmp, GET_MODE (tmp), 0);
14108 if (parts.index && GET_CODE (parts.index) == SUBREG)
14110 rtx tmp = SUBREG_REG (parts.index);
14111 parts.index = simplify_subreg (GET_MODE (parts.index),
14112 tmp, GET_MODE (tmp), 0);
14116 index = parts.index;
14118 scale = parts.scale;
14126 if (ASSEMBLER_DIALECT == ASM_ATT)
14128 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
14131 gcc_unreachable ();
14134 /* Use one byte shorter RIP relative addressing for 64bit mode. */
14135 if (TARGET_64BIT && !base && !index)
14139 if (GET_CODE (disp) == CONST
14140 && GET_CODE (XEXP (disp, 0)) == PLUS
14141 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14142 symbol = XEXP (XEXP (disp, 0), 0);
14144 if (GET_CODE (symbol) == LABEL_REF
14145 || (GET_CODE (symbol) == SYMBOL_REF
14146 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
14149 if (!base && !index)
14151 /* Displacement only requires special attention. */
14153 if (CONST_INT_P (disp))
14155 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
14156 fputs ("ds:", file);
14157 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
14160 output_pic_addr_const (file, disp, 0);
14162 output_addr_const (file, disp);
14166 /* Print DImode registers on 64bit targets to avoid addr32 prefixes. */
14167 int code = TARGET_64BIT ? 'q' : 0;
14169 if (ASSEMBLER_DIALECT == ASM_ATT)
14174 output_pic_addr_const (file, disp, 0);
14175 else if (GET_CODE (disp) == LABEL_REF)
14176 output_asm_label (disp);
14178 output_addr_const (file, disp);
14183 print_reg (base, code, file);
14187 print_reg (index, code, file);
14189 fprintf (file, ",%d", scale);
14195 rtx offset = NULL_RTX;
14199 /* Pull out the offset of a symbol; print any symbol itself. */
14200 if (GET_CODE (disp) == CONST
14201 && GET_CODE (XEXP (disp, 0)) == PLUS
14202 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
14204 offset = XEXP (XEXP (disp, 0), 1);
14205 disp = gen_rtx_CONST (VOIDmode,
14206 XEXP (XEXP (disp, 0), 0));
14210 output_pic_addr_const (file, disp, 0);
14211 else if (GET_CODE (disp) == LABEL_REF)
14212 output_asm_label (disp);
14213 else if (CONST_INT_P (disp))
14216 output_addr_const (file, disp);
14222 print_reg (base, code, file);
14225 if (INTVAL (offset) >= 0)
14227 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14231 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
14238 print_reg (index, code, file);
14240 fprintf (file, "*%d", scale);
14247 /* Implementation of TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
14250 i386_asm_output_addr_const_extra (FILE *file, rtx x)
14254 if (GET_CODE (x) != UNSPEC)
14257 op = XVECEXP (x, 0, 0);
14258 switch (XINT (x, 1))
14260 case UNSPEC_GOTTPOFF:
14261 output_addr_const (file, op);
14262 /* FIXME: This might be @TPOFF in Sun ld. */
14263 fputs ("@gottpoff", file);
14266 output_addr_const (file, op);
14267 fputs ("@tpoff", file);
14269 case UNSPEC_NTPOFF:
14270 output_addr_const (file, op);
14272 fputs ("@tpoff", file);
14274 fputs ("@ntpoff", file);
14276 case UNSPEC_DTPOFF:
14277 output_addr_const (file, op);
14278 fputs ("@dtpoff", file);
14280 case UNSPEC_GOTNTPOFF:
14281 output_addr_const (file, op);
14283 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
14284 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
14286 fputs ("@gotntpoff", file);
14288 case UNSPEC_INDNTPOFF:
14289 output_addr_const (file, op);
14290 fputs ("@indntpoff", file);
14293 case UNSPEC_MACHOPIC_OFFSET:
14294 output_addr_const (file, op);
14296 machopic_output_function_base_name (file);
14300 case UNSPEC_STACK_CHECK:
14304 gcc_assert (flag_split_stack);
14306 #ifdef TARGET_THREAD_SPLIT_STACK_OFFSET
14307 offset = TARGET_THREAD_SPLIT_STACK_OFFSET;
14309 gcc_unreachable ();
14312 fprintf (file, "%s:%d", TARGET_64BIT ? "%fs" : "%gs", offset);
14323 /* Split one or more double-mode RTL references into pairs of half-mode
14324 references. The RTL can be REG, offsettable MEM, integer constant, or
14325 CONST_DOUBLE. "operands" is a pointer to an array of double-mode RTLs to
14326 split and "num" is its length. lo_half and hi_half are output arrays
14327 that parallel "operands". */
14330 split_double_mode (enum machine_mode mode, rtx operands[],
14331 int num, rtx lo_half[], rtx hi_half[])
14333 enum machine_mode half_mode;
14339 half_mode = DImode;
14342 half_mode = SImode;
14345 gcc_unreachable ();
14348 byte = GET_MODE_SIZE (half_mode);
14352 rtx op = operands[num];
14354 /* simplify_subreg refuse to split volatile memory addresses,
14355 but we still have to handle it. */
14358 lo_half[num] = adjust_address (op, half_mode, 0);
14359 hi_half[num] = adjust_address (op, half_mode, byte);
14363 lo_half[num] = simplify_gen_subreg (half_mode, op,
14364 GET_MODE (op) == VOIDmode
14365 ? mode : GET_MODE (op), 0);
14366 hi_half[num] = simplify_gen_subreg (half_mode, op,
14367 GET_MODE (op) == VOIDmode
14368 ? mode : GET_MODE (op), byte);
14373 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
14374 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
14375 is the expression of the binary operation. The output may either be
14376 emitted here, or returned to the caller, like all output_* functions.
14378 There is no guarantee that the operands are the same mode, as they
14379 might be within FLOAT or FLOAT_EXTEND expressions. */
14381 #ifndef SYSV386_COMPAT
14382 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
14383 wants to fix the assemblers because that causes incompatibility
14384 with gcc. No-one wants to fix gcc because that causes
14385 incompatibility with assemblers... You can use the option of
14386 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
14387 #define SYSV386_COMPAT 1
14391 output_387_binary_op (rtx insn, rtx *operands)
14393 static char buf[40];
14396 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
14398 #ifdef ENABLE_CHECKING
14399 /* Even if we do not want to check the inputs, this documents input
14400 constraints. Which helps in understanding the following code. */
14401 if (STACK_REG_P (operands[0])
14402 && ((REG_P (operands[1])
14403 && REGNO (operands[0]) == REGNO (operands[1])
14404 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
14405 || (REG_P (operands[2])
14406 && REGNO (operands[0]) == REGNO (operands[2])
14407 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
14408 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
14411 gcc_assert (is_sse);
14414 switch (GET_CODE (operands[3]))
14417 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14418 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14426 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14427 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14435 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14436 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14444 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
14445 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
14453 gcc_unreachable ();
14460 strcpy (buf, ssep);
14461 if (GET_MODE (operands[0]) == SFmode)
14462 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
14464 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
14468 strcpy (buf, ssep + 1);
14469 if (GET_MODE (operands[0]) == SFmode)
14470 strcat (buf, "ss\t{%2, %0|%0, %2}");
14472 strcat (buf, "sd\t{%2, %0|%0, %2}");
14478 switch (GET_CODE (operands[3]))
14482 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
14484 rtx temp = operands[2];
14485 operands[2] = operands[1];
14486 operands[1] = temp;
14489 /* know operands[0] == operands[1]. */
14491 if (MEM_P (operands[2]))
14497 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14499 if (STACK_TOP_P (operands[0]))
14500 /* How is it that we are storing to a dead operand[2]?
14501 Well, presumably operands[1] is dead too. We can't
14502 store the result to st(0) as st(0) gets popped on this
14503 instruction. Instead store to operands[2] (which I
14504 think has to be st(1)). st(1) will be popped later.
14505 gcc <= 2.8.1 didn't have this check and generated
14506 assembly code that the Unixware assembler rejected. */
14507 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14509 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14513 if (STACK_TOP_P (operands[0]))
14514 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14516 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14521 if (MEM_P (operands[1]))
14527 if (MEM_P (operands[2]))
14533 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
14536 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
14537 derived assemblers, confusingly reverse the direction of
14538 the operation for fsub{r} and fdiv{r} when the
14539 destination register is not st(0). The Intel assembler
14540 doesn't have this brain damage. Read !SYSV386_COMPAT to
14541 figure out what the hardware really does. */
14542 if (STACK_TOP_P (operands[0]))
14543 p = "{p\t%0, %2|rp\t%2, %0}";
14545 p = "{rp\t%2, %0|p\t%0, %2}";
14547 if (STACK_TOP_P (operands[0]))
14548 /* As above for fmul/fadd, we can't store to st(0). */
14549 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
14551 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
14556 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
14559 if (STACK_TOP_P (operands[0]))
14560 p = "{rp\t%0, %1|p\t%1, %0}";
14562 p = "{p\t%1, %0|rp\t%0, %1}";
14564 if (STACK_TOP_P (operands[0]))
14565 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
14567 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
14572 if (STACK_TOP_P (operands[0]))
14574 if (STACK_TOP_P (operands[1]))
14575 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
14577 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
14580 else if (STACK_TOP_P (operands[1]))
14583 p = "{\t%1, %0|r\t%0, %1}";
14585 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
14591 p = "{r\t%2, %0|\t%0, %2}";
14593 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
14599 gcc_unreachable ();
14606 /* Return needed mode for entity in optimize_mode_switching pass. */
14609 ix86_mode_needed (int entity, rtx insn)
14611 enum attr_i387_cw mode;
14613 /* The mode UNINITIALIZED is used to store control word after a
14614 function call or ASM pattern. The mode ANY specify that function
14615 has no requirements on the control word and make no changes in the
14616 bits we are interested in. */
14619 || (NONJUMP_INSN_P (insn)
14620 && (asm_noperands (PATTERN (insn)) >= 0
14621 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
14622 return I387_CW_UNINITIALIZED;
14624 if (recog_memoized (insn) < 0)
14625 return I387_CW_ANY;
14627 mode = get_attr_i387_cw (insn);
14632 if (mode == I387_CW_TRUNC)
14637 if (mode == I387_CW_FLOOR)
14642 if (mode == I387_CW_CEIL)
14647 if (mode == I387_CW_MASK_PM)
14652 gcc_unreachable ();
14655 return I387_CW_ANY;
14658 /* Output code to initialize control word copies used by trunc?f?i and
14659 rounding patterns. CURRENT_MODE is set to current control word,
14660 while NEW_MODE is set to new control word. */
14663 emit_i387_cw_initialization (int mode)
14665 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
14668 enum ix86_stack_slot slot;
14670 rtx reg = gen_reg_rtx (HImode);
14672 emit_insn (gen_x86_fnstcw_1 (stored_mode));
14673 emit_move_insn (reg, copy_rtx (stored_mode));
14675 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
14676 || optimize_function_for_size_p (cfun))
14680 case I387_CW_TRUNC:
14681 /* round toward zero (truncate) */
14682 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
14683 slot = SLOT_CW_TRUNC;
14686 case I387_CW_FLOOR:
14687 /* round down toward -oo */
14688 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
14689 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
14690 slot = SLOT_CW_FLOOR;
14694 /* round up toward +oo */
14695 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
14696 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
14697 slot = SLOT_CW_CEIL;
14700 case I387_CW_MASK_PM:
14701 /* mask precision exception for nearbyint() */
14702 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
14703 slot = SLOT_CW_MASK_PM;
14707 gcc_unreachable ();
14714 case I387_CW_TRUNC:
14715 /* round toward zero (truncate) */
14716 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
14717 slot = SLOT_CW_TRUNC;
14720 case I387_CW_FLOOR:
14721 /* round down toward -oo */
14722 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
14723 slot = SLOT_CW_FLOOR;
14727 /* round up toward +oo */
14728 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
14729 slot = SLOT_CW_CEIL;
14732 case I387_CW_MASK_PM:
14733 /* mask precision exception for nearbyint() */
14734 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
14735 slot = SLOT_CW_MASK_PM;
14739 gcc_unreachable ();
14743 gcc_assert (slot < MAX_386_STACK_LOCALS);
14745 new_mode = assign_386_stack_local (HImode, slot);
14746 emit_move_insn (new_mode, reg);
14749 /* Output code for INSN to convert a float to a signed int. OPERANDS
14750 are the insn operands. The output may be [HSD]Imode and the input
14751 operand may be [SDX]Fmode. */
14754 output_fix_trunc (rtx insn, rtx *operands, bool fisttp)
14756 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
14757 int dimode_p = GET_MODE (operands[0]) == DImode;
14758 int round_mode = get_attr_i387_cw (insn);
14760 /* Jump through a hoop or two for DImode, since the hardware has no
14761 non-popping instruction. We used to do this a different way, but
14762 that was somewhat fragile and broke with post-reload splitters. */
14763 if ((dimode_p || fisttp) && !stack_top_dies)
14764 output_asm_insn ("fld\t%y1", operands);
14766 gcc_assert (STACK_TOP_P (operands[1]));
14767 gcc_assert (MEM_P (operands[0]));
14768 gcc_assert (GET_MODE (operands[1]) != TFmode);
14771 output_asm_insn ("fisttp%Z0\t%0", operands);
14774 if (round_mode != I387_CW_ANY)
14775 output_asm_insn ("fldcw\t%3", operands);
14776 if (stack_top_dies || dimode_p)
14777 output_asm_insn ("fistp%Z0\t%0", operands);
14779 output_asm_insn ("fist%Z0\t%0", operands);
14780 if (round_mode != I387_CW_ANY)
14781 output_asm_insn ("fldcw\t%2", operands);
14787 /* Output code for x87 ffreep insn. The OPNO argument, which may only
14788 have the values zero or one, indicates the ffreep insn's operand
14789 from the OPERANDS array. */
14791 static const char *
14792 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
14794 if (TARGET_USE_FFREEP)
14795 #ifdef HAVE_AS_IX86_FFREEP
14796 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
14799 static char retval[32];
14800 int regno = REGNO (operands[opno]);
14802 gcc_assert (FP_REGNO_P (regno));
14804 regno -= FIRST_STACK_REG;
14806 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
14811 return opno ? "fstp\t%y1" : "fstp\t%y0";
14815 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
14816 should be used. UNORDERED_P is true when fucom should be used. */
14819 output_fp_compare (rtx insn, rtx *operands, bool eflags_p, bool unordered_p)
14821 int stack_top_dies;
14822 rtx cmp_op0, cmp_op1;
14823 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
14827 cmp_op0 = operands[0];
14828 cmp_op1 = operands[1];
14832 cmp_op0 = operands[1];
14833 cmp_op1 = operands[2];
14838 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
14839 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
14840 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
14841 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
14843 if (GET_MODE (operands[0]) == SFmode)
14845 return &ucomiss[TARGET_AVX ? 0 : 1];
14847 return &comiss[TARGET_AVX ? 0 : 1];
14850 return &ucomisd[TARGET_AVX ? 0 : 1];
14852 return &comisd[TARGET_AVX ? 0 : 1];
14855 gcc_assert (STACK_TOP_P (cmp_op0));
14857 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
14859 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
14861 if (stack_top_dies)
14863 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
14864 return output_387_ffreep (operands, 1);
14867 return "ftst\n\tfnstsw\t%0";
14870 if (STACK_REG_P (cmp_op1)
14872 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
14873 && REGNO (cmp_op1) != FIRST_STACK_REG)
14875 /* If both the top of the 387 stack dies, and the other operand
14876 is also a stack register that dies, then this must be a
14877 `fcompp' float compare */
14881 /* There is no double popping fcomi variant. Fortunately,
14882 eflags is immune from the fstp's cc clobbering. */
14884 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
14886 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
14887 return output_387_ffreep (operands, 0);
14892 return "fucompp\n\tfnstsw\t%0";
14894 return "fcompp\n\tfnstsw\t%0";
14899 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
14901 static const char * const alt[16] =
14903 "fcom%Z2\t%y2\n\tfnstsw\t%0",
14904 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
14905 "fucom%Z2\t%y2\n\tfnstsw\t%0",
14906 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
14908 "ficom%Z2\t%y2\n\tfnstsw\t%0",
14909 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
14913 "fcomi\t{%y1, %0|%0, %y1}",
14914 "fcomip\t{%y1, %0|%0, %y1}",
14915 "fucomi\t{%y1, %0|%0, %y1}",
14916 "fucomip\t{%y1, %0|%0, %y1}",
14927 mask = eflags_p << 3;
14928 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
14929 mask |= unordered_p << 1;
14930 mask |= stack_top_dies;
14932 gcc_assert (mask < 16);
14941 ix86_output_addr_vec_elt (FILE *file, int value)
14943 const char *directive = ASM_LONG;
14947 directive = ASM_QUAD;
14949 gcc_assert (!TARGET_64BIT);
14952 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
14956 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
14958 const char *directive = ASM_LONG;
14961 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
14962 directive = ASM_QUAD;
14964 gcc_assert (!TARGET_64BIT);
14966 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
14967 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
14968 fprintf (file, "%s%s%d-%s%d\n",
14969 directive, LPREFIX, value, LPREFIX, rel);
14970 else if (HAVE_AS_GOTOFF_IN_DATA)
14971 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
14973 else if (TARGET_MACHO)
14975 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
14976 machopic_output_function_base_name (file);
14981 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
14982 GOT_SYMBOL_NAME, LPREFIX, value);
14985 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
14989 ix86_expand_clear (rtx dest)
14993 /* We play register width games, which are only valid after reload. */
14994 gcc_assert (reload_completed);
14996 /* Avoid HImode and its attendant prefix byte. */
14997 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
14998 dest = gen_rtx_REG (SImode, REGNO (dest));
14999 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
15001 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
15002 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
15004 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15005 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
15011 /* X is an unchanging MEM. If it is a constant pool reference, return
15012 the constant pool rtx, else NULL. */
15015 maybe_get_pool_constant (rtx x)
15017 x = ix86_delegitimize_address (XEXP (x, 0));
15019 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
15020 return get_pool_constant (x);
15026 ix86_expand_move (enum machine_mode mode, rtx operands[])
15029 enum tls_model model;
15034 if (GET_CODE (op1) == SYMBOL_REF)
15036 model = SYMBOL_REF_TLS_MODEL (op1);
15039 op1 = legitimize_tls_address (op1, model, true);
15040 op1 = force_operand (op1, op0);
15043 if (GET_MODE (op1) != mode)
15044 op1 = convert_to_mode (mode, op1, 1);
15046 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15047 && SYMBOL_REF_DLLIMPORT_P (op1))
15048 op1 = legitimize_dllimport_symbol (op1, false);
15050 else if (GET_CODE (op1) == CONST
15051 && GET_CODE (XEXP (op1, 0)) == PLUS
15052 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
15054 rtx addend = XEXP (XEXP (op1, 0), 1);
15055 rtx symbol = XEXP (XEXP (op1, 0), 0);
15058 model = SYMBOL_REF_TLS_MODEL (symbol);
15060 tmp = legitimize_tls_address (symbol, model, true);
15061 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
15062 && SYMBOL_REF_DLLIMPORT_P (symbol))
15063 tmp = legitimize_dllimport_symbol (symbol, true);
15067 tmp = force_operand (tmp, NULL);
15068 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
15069 op0, 1, OPTAB_DIRECT);
15072 if (GET_MODE (tmp) != mode)
15073 op1 = convert_to_mode (mode, tmp, 1);
15077 if ((flag_pic || MACHOPIC_INDIRECT)
15078 && symbolic_operand (op1, mode))
15080 if (TARGET_MACHO && !TARGET_64BIT)
15083 /* dynamic-no-pic */
15084 if (MACHOPIC_INDIRECT)
15086 rtx temp = ((reload_in_progress
15087 || ((op0 && REG_P (op0))
15089 ? op0 : gen_reg_rtx (Pmode));
15090 op1 = machopic_indirect_data_reference (op1, temp);
15092 op1 = machopic_legitimize_pic_address (op1, mode,
15093 temp == op1 ? 0 : temp);
15095 if (op0 != op1 && GET_CODE (op0) != MEM)
15097 rtx insn = gen_rtx_SET (VOIDmode, op0, op1);
15101 if (GET_CODE (op0) == MEM)
15102 op1 = force_reg (Pmode, op1);
15106 if (GET_CODE (temp) != REG)
15107 temp = gen_reg_rtx (Pmode);
15108 temp = legitimize_pic_address (op1, temp);
15113 /* dynamic-no-pic */
15119 op1 = force_reg (mode, op1);
15120 else if (!(TARGET_64BIT && x86_64_movabs_operand (op1, DImode)))
15122 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
15123 op1 = legitimize_pic_address (op1, reg);
15126 if (GET_MODE (op1) != mode)
15127 op1 = convert_to_mode (mode, op1, 1);
15134 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
15135 || !push_operand (op0, mode))
15137 op1 = force_reg (mode, op1);
15139 if (push_operand (op0, mode)
15140 && ! general_no_elim_operand (op1, mode))
15141 op1 = copy_to_mode_reg (mode, op1);
15143 /* Force large constants in 64bit compilation into register
15144 to get them CSEed. */
15145 if (can_create_pseudo_p ()
15146 && (mode == DImode) && TARGET_64BIT
15147 && immediate_operand (op1, mode)
15148 && !x86_64_zext_immediate_operand (op1, VOIDmode)
15149 && !register_operand (op0, mode)
15151 op1 = copy_to_mode_reg (mode, op1);
15153 if (can_create_pseudo_p ()
15154 && FLOAT_MODE_P (mode)
15155 && GET_CODE (op1) == CONST_DOUBLE)
15157 /* If we are loading a floating point constant to a register,
15158 force the value to memory now, since we'll get better code
15159 out the back end. */
15161 op1 = validize_mem (force_const_mem (mode, op1));
15162 if (!register_operand (op0, mode))
15164 rtx temp = gen_reg_rtx (mode);
15165 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
15166 emit_move_insn (op0, temp);
15172 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15176 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
15178 rtx op0 = operands[0], op1 = operands[1];
15179 unsigned int align = GET_MODE_ALIGNMENT (mode);
15181 /* Force constants other than zero into memory. We do not know how
15182 the instructions used to build constants modify the upper 64 bits
15183 of the register, once we have that information we may be able
15184 to handle some of them more efficiently. */
15185 if (can_create_pseudo_p ()
15186 && register_operand (op0, mode)
15187 && (CONSTANT_P (op1)
15188 || (GET_CODE (op1) == SUBREG
15189 && CONSTANT_P (SUBREG_REG (op1))))
15190 && !standard_sse_constant_p (op1))
15191 op1 = validize_mem (force_const_mem (mode, op1));
15193 /* We need to check memory alignment for SSE mode since attribute
15194 can make operands unaligned. */
15195 if (can_create_pseudo_p ()
15196 && SSE_REG_MODE_P (mode)
15197 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
15198 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
15202 /* ix86_expand_vector_move_misalign() does not like constants ... */
15203 if (CONSTANT_P (op1)
15204 || (GET_CODE (op1) == SUBREG
15205 && CONSTANT_P (SUBREG_REG (op1))))
15206 op1 = validize_mem (force_const_mem (mode, op1));
15208 /* ... nor both arguments in memory. */
15209 if (!register_operand (op0, mode)
15210 && !register_operand (op1, mode))
15211 op1 = force_reg (mode, op1);
15213 tmp[0] = op0; tmp[1] = op1;
15214 ix86_expand_vector_move_misalign (mode, tmp);
15218 /* Make operand1 a register if it isn't already. */
15219 if (can_create_pseudo_p ()
15220 && !register_operand (op0, mode)
15221 && !register_operand (op1, mode))
15223 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
15227 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
15230 /* Split 32-byte AVX unaligned load and store if needed. */
15233 ix86_avx256_split_vector_move_misalign (rtx op0, rtx op1)
15236 rtx (*extract) (rtx, rtx, rtx);
15237 rtx (*move_unaligned) (rtx, rtx);
15238 enum machine_mode mode;
15240 switch (GET_MODE (op0))
15243 gcc_unreachable ();
15245 extract = gen_avx_vextractf128v32qi;
15246 move_unaligned = gen_avx_movdqu256;
15250 extract = gen_avx_vextractf128v8sf;
15251 move_unaligned = gen_avx_movups256;
15255 extract = gen_avx_vextractf128v4df;
15256 move_unaligned = gen_avx_movupd256;
15261 if (MEM_P (op1) && TARGET_AVX256_SPLIT_UNALIGNED_LOAD)
15263 rtx r = gen_reg_rtx (mode);
15264 m = adjust_address (op1, mode, 0);
15265 emit_move_insn (r, m);
15266 m = adjust_address (op1, mode, 16);
15267 r = gen_rtx_VEC_CONCAT (GET_MODE (op0), r, m);
15268 emit_move_insn (op0, r);
15270 else if (MEM_P (op0) && TARGET_AVX256_SPLIT_UNALIGNED_STORE)
15272 m = adjust_address (op0, mode, 0);
15273 emit_insn (extract (m, op1, const0_rtx));
15274 m = adjust_address (op0, mode, 16);
15275 emit_insn (extract (m, op1, const1_rtx));
15278 emit_insn (move_unaligned (op0, op1));
15281 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
15282 straight to ix86_expand_vector_move. */
15283 /* Code generation for scalar reg-reg moves of single and double precision data:
15284 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
15288 if (x86_sse_partial_reg_dependency == true)
15293 Code generation for scalar loads of double precision data:
15294 if (x86_sse_split_regs == true)
15295 movlpd mem, reg (gas syntax)
15299 Code generation for unaligned packed loads of single precision data
15300 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
15301 if (x86_sse_unaligned_move_optimal)
15304 if (x86_sse_partial_reg_dependency == true)
15316 Code generation for unaligned packed loads of double precision data
15317 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
15318 if (x86_sse_unaligned_move_optimal)
15321 if (x86_sse_split_regs == true)
15334 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
15343 switch (GET_MODE_CLASS (mode))
15345 case MODE_VECTOR_INT:
15347 switch (GET_MODE_SIZE (mode))
15350 /* If we're optimizing for size, movups is the smallest. */
15351 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15353 op0 = gen_lowpart (V4SFmode, op0);
15354 op1 = gen_lowpart (V4SFmode, op1);
15355 emit_insn (gen_sse_movups (op0, op1));
15358 op0 = gen_lowpart (V16QImode, op0);
15359 op1 = gen_lowpart (V16QImode, op1);
15360 emit_insn (gen_sse2_movdqu (op0, op1));
15363 op0 = gen_lowpart (V32QImode, op0);
15364 op1 = gen_lowpart (V32QImode, op1);
15365 ix86_avx256_split_vector_move_misalign (op0, op1);
15368 gcc_unreachable ();
15371 case MODE_VECTOR_FLOAT:
15372 op0 = gen_lowpart (mode, op0);
15373 op1 = gen_lowpart (mode, op1);
15378 emit_insn (gen_sse_movups (op0, op1));
15381 ix86_avx256_split_vector_move_misalign (op0, op1);
15384 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15386 op0 = gen_lowpart (V4SFmode, op0);
15387 op1 = gen_lowpart (V4SFmode, op1);
15388 emit_insn (gen_sse_movups (op0, op1));
15391 emit_insn (gen_sse2_movupd (op0, op1));
15394 ix86_avx256_split_vector_move_misalign (op0, op1);
15397 gcc_unreachable ();
15402 gcc_unreachable ();
15410 /* If we're optimizing for size, movups is the smallest. */
15411 if (optimize_insn_for_size_p ()
15412 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15414 op0 = gen_lowpart (V4SFmode, op0);
15415 op1 = gen_lowpart (V4SFmode, op1);
15416 emit_insn (gen_sse_movups (op0, op1));
15420 /* ??? If we have typed data, then it would appear that using
15421 movdqu is the only way to get unaligned data loaded with
15423 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15425 op0 = gen_lowpart (V16QImode, op0);
15426 op1 = gen_lowpart (V16QImode, op1);
15427 emit_insn (gen_sse2_movdqu (op0, op1));
15431 if (TARGET_SSE2 && mode == V2DFmode)
15435 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15437 op0 = gen_lowpart (V2DFmode, op0);
15438 op1 = gen_lowpart (V2DFmode, op1);
15439 emit_insn (gen_sse2_movupd (op0, op1));
15443 /* When SSE registers are split into halves, we can avoid
15444 writing to the top half twice. */
15445 if (TARGET_SSE_SPLIT_REGS)
15447 emit_clobber (op0);
15452 /* ??? Not sure about the best option for the Intel chips.
15453 The following would seem to satisfy; the register is
15454 entirely cleared, breaking the dependency chain. We
15455 then store to the upper half, with a dependency depth
15456 of one. A rumor has it that Intel recommends two movsd
15457 followed by an unpacklpd, but this is unconfirmed. And
15458 given that the dependency depth of the unpacklpd would
15459 still be one, I'm not sure why this would be better. */
15460 zero = CONST0_RTX (V2DFmode);
15463 m = adjust_address (op1, DFmode, 0);
15464 emit_insn (gen_sse2_loadlpd (op0, zero, m));
15465 m = adjust_address (op1, DFmode, 8);
15466 emit_insn (gen_sse2_loadhpd (op0, op0, m));
15470 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
15472 op0 = gen_lowpart (V4SFmode, op0);
15473 op1 = gen_lowpart (V4SFmode, op1);
15474 emit_insn (gen_sse_movups (op0, op1));
15478 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
15479 emit_move_insn (op0, CONST0_RTX (mode));
15481 emit_clobber (op0);
15483 if (mode != V4SFmode)
15484 op0 = gen_lowpart (V4SFmode, op0);
15485 m = adjust_address (op1, V2SFmode, 0);
15486 emit_insn (gen_sse_loadlps (op0, op0, m));
15487 m = adjust_address (op1, V2SFmode, 8);
15488 emit_insn (gen_sse_loadhps (op0, op0, m));
15491 else if (MEM_P (op0))
15493 /* If we're optimizing for size, movups is the smallest. */
15494 if (optimize_insn_for_size_p ()
15495 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
15497 op0 = gen_lowpart (V4SFmode, op0);
15498 op1 = gen_lowpart (V4SFmode, op1);
15499 emit_insn (gen_sse_movups (op0, op1));
15503 /* ??? Similar to above, only less clear because of quote
15504 typeless stores unquote. */
15505 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
15506 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15508 op0 = gen_lowpart (V16QImode, op0);
15509 op1 = gen_lowpart (V16QImode, op1);
15510 emit_insn (gen_sse2_movdqu (op0, op1));
15514 if (TARGET_SSE2 && mode == V2DFmode)
15516 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15518 op0 = gen_lowpart (V2DFmode, op0);
15519 op1 = gen_lowpart (V2DFmode, op1);
15520 emit_insn (gen_sse2_movupd (op0, op1));
15524 m = adjust_address (op0, DFmode, 0);
15525 emit_insn (gen_sse2_storelpd (m, op1));
15526 m = adjust_address (op0, DFmode, 8);
15527 emit_insn (gen_sse2_storehpd (m, op1));
15532 if (mode != V4SFmode)
15533 op1 = gen_lowpart (V4SFmode, op1);
15535 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
15537 op0 = gen_lowpart (V4SFmode, op0);
15538 emit_insn (gen_sse_movups (op0, op1));
15542 m = adjust_address (op0, V2SFmode, 0);
15543 emit_insn (gen_sse_storelps (m, op1));
15544 m = adjust_address (op0, V2SFmode, 8);
15545 emit_insn (gen_sse_storehps (m, op1));
15550 gcc_unreachable ();
15553 /* Expand a push in MODE. This is some mode for which we do not support
15554 proper push instructions, at least from the registers that we expect
15555 the value to live in. */
15558 ix86_expand_push (enum machine_mode mode, rtx x)
15562 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
15563 GEN_INT (-GET_MODE_SIZE (mode)),
15564 stack_pointer_rtx, 1, OPTAB_DIRECT);
15565 if (tmp != stack_pointer_rtx)
15566 emit_move_insn (stack_pointer_rtx, tmp);
15568 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
15570 /* When we push an operand onto stack, it has to be aligned at least
15571 at the function argument boundary. However since we don't have
15572 the argument type, we can't determine the actual argument
15574 emit_move_insn (tmp, x);
15577 /* Helper function of ix86_fixup_binary_operands to canonicalize
15578 operand order. Returns true if the operands should be swapped. */
15581 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
15584 rtx dst = operands[0];
15585 rtx src1 = operands[1];
15586 rtx src2 = operands[2];
15588 /* If the operation is not commutative, we can't do anything. */
15589 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
15592 /* Highest priority is that src1 should match dst. */
15593 if (rtx_equal_p (dst, src1))
15595 if (rtx_equal_p (dst, src2))
15598 /* Next highest priority is that immediate constants come second. */
15599 if (immediate_operand (src2, mode))
15601 if (immediate_operand (src1, mode))
15604 /* Lowest priority is that memory references should come second. */
15614 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
15615 destination to use for the operation. If different from the true
15616 destination in operands[0], a copy operation will be required. */
15619 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
15622 rtx dst = operands[0];
15623 rtx src1 = operands[1];
15624 rtx src2 = operands[2];
15626 /* Canonicalize operand order. */
15627 if (ix86_swap_binary_operands_p (code, mode, operands))
15631 /* It is invalid to swap operands of different modes. */
15632 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
15639 /* Both source operands cannot be in memory. */
15640 if (MEM_P (src1) && MEM_P (src2))
15642 /* Optimization: Only read from memory once. */
15643 if (rtx_equal_p (src1, src2))
15645 src2 = force_reg (mode, src2);
15649 src2 = force_reg (mode, src2);
15652 /* If the destination is memory, and we do not have matching source
15653 operands, do things in registers. */
15654 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
15655 dst = gen_reg_rtx (mode);
15657 /* Source 1 cannot be a constant. */
15658 if (CONSTANT_P (src1))
15659 src1 = force_reg (mode, src1);
15661 /* Source 1 cannot be a non-matching memory. */
15662 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
15663 src1 = force_reg (mode, src1);
15665 operands[1] = src1;
15666 operands[2] = src2;
15670 /* Similarly, but assume that the destination has already been
15671 set up properly. */
15674 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
15675 enum machine_mode mode, rtx operands[])
15677 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
15678 gcc_assert (dst == operands[0]);
15681 /* Attempt to expand a binary operator. Make the expansion closer to the
15682 actual machine, then just general_operand, which will allow 3 separate
15683 memory references (one output, two input) in a single insn. */
15686 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
15689 rtx src1, src2, dst, op, clob;
15691 dst = ix86_fixup_binary_operands (code, mode, operands);
15692 src1 = operands[1];
15693 src2 = operands[2];
15695 /* Emit the instruction. */
15697 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
15698 if (reload_in_progress)
15700 /* Reload doesn't know about the flags register, and doesn't know that
15701 it doesn't want to clobber it. We can only do this with PLUS. */
15702 gcc_assert (code == PLUS);
15705 else if (reload_completed
15707 && !rtx_equal_p (dst, src1))
15709 /* This is going to be an LEA; avoid splitting it later. */
15714 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15715 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
15718 /* Fix up the destination if needed. */
15719 if (dst != operands[0])
15720 emit_move_insn (operands[0], dst);
15723 /* Return TRUE or FALSE depending on whether the binary operator meets the
15724 appropriate constraints. */
15727 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
15730 rtx dst = operands[0];
15731 rtx src1 = operands[1];
15732 rtx src2 = operands[2];
15734 /* Both source operands cannot be in memory. */
15735 if (MEM_P (src1) && MEM_P (src2))
15738 /* Canonicalize operand order for commutative operators. */
15739 if (ix86_swap_binary_operands_p (code, mode, operands))
15746 /* If the destination is memory, we must have a matching source operand. */
15747 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
15750 /* Source 1 cannot be a constant. */
15751 if (CONSTANT_P (src1))
15754 /* Source 1 cannot be a non-matching memory. */
15755 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
15757 /* Support "andhi/andsi/anddi" as a zero-extending move. */
15758 return (code == AND
15761 || (TARGET_64BIT && mode == DImode))
15762 && CONST_INT_P (src2)
15763 && (INTVAL (src2) == 0xff
15764 || INTVAL (src2) == 0xffff));
15770 /* Attempt to expand a unary operator. Make the expansion closer to the
15771 actual machine, then just general_operand, which will allow 2 separate
15772 memory references (one output, one input) in a single insn. */
15775 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
15778 int matching_memory;
15779 rtx src, dst, op, clob;
15784 /* If the destination is memory, and we do not have matching source
15785 operands, do things in registers. */
15786 matching_memory = 0;
15789 if (rtx_equal_p (dst, src))
15790 matching_memory = 1;
15792 dst = gen_reg_rtx (mode);
15795 /* When source operand is memory, destination must match. */
15796 if (MEM_P (src) && !matching_memory)
15797 src = force_reg (mode, src);
15799 /* Emit the instruction. */
15801 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
15802 if (reload_in_progress || code == NOT)
15804 /* Reload doesn't know about the flags register, and doesn't know that
15805 it doesn't want to clobber it. */
15806 gcc_assert (code == NOT);
15811 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
15812 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
15815 /* Fix up the destination if needed. */
15816 if (dst != operands[0])
15817 emit_move_insn (operands[0], dst);
15820 /* Split 32bit/64bit divmod with 8bit unsigned divmod if dividend and
15821 divisor are within the range [0-255]. */
15824 ix86_split_idivmod (enum machine_mode mode, rtx operands[],
15827 rtx end_label, qimode_label;
15828 rtx insn, div, mod;
15829 rtx scratch, tmp0, tmp1, tmp2;
15830 rtx (*gen_divmod4_1) (rtx, rtx, rtx, rtx);
15831 rtx (*gen_zero_extend) (rtx, rtx);
15832 rtx (*gen_test_ccno_1) (rtx, rtx);
15837 gen_divmod4_1 = signed_p ? gen_divmodsi4_1 : gen_udivmodsi4_1;
15838 gen_test_ccno_1 = gen_testsi_ccno_1;
15839 gen_zero_extend = gen_zero_extendqisi2;
15842 gen_divmod4_1 = signed_p ? gen_divmoddi4_1 : gen_udivmoddi4_1;
15843 gen_test_ccno_1 = gen_testdi_ccno_1;
15844 gen_zero_extend = gen_zero_extendqidi2;
15847 gcc_unreachable ();
15850 end_label = gen_label_rtx ();
15851 qimode_label = gen_label_rtx ();
15853 scratch = gen_reg_rtx (mode);
15855 /* Use 8bit unsigned divimod if dividend and divisor are within
15856 the range [0-255]. */
15857 emit_move_insn (scratch, operands[2]);
15858 scratch = expand_simple_binop (mode, IOR, scratch, operands[3],
15859 scratch, 1, OPTAB_DIRECT);
15860 emit_insn (gen_test_ccno_1 (scratch, GEN_INT (-0x100)));
15861 tmp0 = gen_rtx_REG (CCNOmode, FLAGS_REG);
15862 tmp0 = gen_rtx_EQ (VOIDmode, tmp0, const0_rtx);
15863 tmp0 = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp0,
15864 gen_rtx_LABEL_REF (VOIDmode, qimode_label),
15866 insn = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp0));
15867 predict_jump (REG_BR_PROB_BASE * 50 / 100);
15868 JUMP_LABEL (insn) = qimode_label;
15870 /* Generate original signed/unsigned divimod. */
15871 div = gen_divmod4_1 (operands[0], operands[1],
15872 operands[2], operands[3]);
15875 /* Branch to the end. */
15876 emit_jump_insn (gen_jump (end_label));
15879 /* Generate 8bit unsigned divide. */
15880 emit_label (qimode_label);
15881 /* Don't use operands[0] for result of 8bit divide since not all
15882 registers support QImode ZERO_EXTRACT. */
15883 tmp0 = simplify_gen_subreg (HImode, scratch, mode, 0);
15884 tmp1 = simplify_gen_subreg (HImode, operands[2], mode, 0);
15885 tmp2 = simplify_gen_subreg (QImode, operands[3], mode, 0);
15886 emit_insn (gen_udivmodhiqi3 (tmp0, tmp1, tmp2));
15890 div = gen_rtx_DIV (SImode, operands[2], operands[3]);
15891 mod = gen_rtx_MOD (SImode, operands[2], operands[3]);
15895 div = gen_rtx_UDIV (SImode, operands[2], operands[3]);
15896 mod = gen_rtx_UMOD (SImode, operands[2], operands[3]);
15899 /* Extract remainder from AH. */
15900 tmp1 = gen_rtx_ZERO_EXTRACT (mode, tmp0, GEN_INT (8), GEN_INT (8));
15901 if (REG_P (operands[1]))
15902 insn = emit_move_insn (operands[1], tmp1);
15905 /* Need a new scratch register since the old one has result
15907 scratch = gen_reg_rtx (mode);
15908 emit_move_insn (scratch, tmp1);
15909 insn = emit_move_insn (operands[1], scratch);
15911 set_unique_reg_note (insn, REG_EQUAL, mod);
15913 /* Zero extend quotient from AL. */
15914 tmp1 = gen_lowpart (QImode, tmp0);
15915 insn = emit_insn (gen_zero_extend (operands[0], tmp1));
15916 set_unique_reg_note (insn, REG_EQUAL, div);
15918 emit_label (end_label);
15921 #define LEA_SEARCH_THRESHOLD 12
15923 /* Search backward for non-agu definition of register number REGNO1
15924 or register number REGNO2 in INSN's basic block until
15925 1. Pass LEA_SEARCH_THRESHOLD instructions, or
15926 2. Reach BB boundary, or
15927 3. Reach agu definition.
15928 Returns the distance between the non-agu definition point and INSN.
15929 If no definition point, returns -1. */
15932 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
15935 basic_block bb = BLOCK_FOR_INSN (insn);
15938 enum attr_type insn_type;
15940 if (insn != BB_HEAD (bb))
15942 rtx prev = PREV_INSN (insn);
15943 while (prev && distance < LEA_SEARCH_THRESHOLD)
15945 if (NONDEBUG_INSN_P (prev))
15948 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
15949 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
15950 && !DF_REF_IS_ARTIFICIAL (*def_rec)
15951 && (regno1 == DF_REF_REGNO (*def_rec)
15952 || regno2 == DF_REF_REGNO (*def_rec)))
15954 insn_type = get_attr_type (prev);
15955 if (insn_type != TYPE_LEA)
15959 if (prev == BB_HEAD (bb))
15961 prev = PREV_INSN (prev);
15965 if (distance < LEA_SEARCH_THRESHOLD)
15969 bool simple_loop = false;
15971 FOR_EACH_EDGE (e, ei, bb->preds)
15974 simple_loop = true;
15980 rtx prev = BB_END (bb);
15983 && distance < LEA_SEARCH_THRESHOLD)
15985 if (NONDEBUG_INSN_P (prev))
15988 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
15989 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
15990 && !DF_REF_IS_ARTIFICIAL (*def_rec)
15991 && (regno1 == DF_REF_REGNO (*def_rec)
15992 || regno2 == DF_REF_REGNO (*def_rec)))
15994 insn_type = get_attr_type (prev);
15995 if (insn_type != TYPE_LEA)
15999 prev = PREV_INSN (prev);
16007 /* get_attr_type may modify recog data. We want to make sure
16008 that recog data is valid for instruction INSN, on which
16009 distance_non_agu_define is called. INSN is unchanged here. */
16010 extract_insn_cached (insn);
16014 /* Return the distance between INSN and the next insn that uses
16015 register number REGNO0 in memory address. Return -1 if no such
16016 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
16019 distance_agu_use (unsigned int regno0, rtx insn)
16021 basic_block bb = BLOCK_FOR_INSN (insn);
16026 if (insn != BB_END (bb))
16028 rtx next = NEXT_INSN (insn);
16029 while (next && distance < LEA_SEARCH_THRESHOLD)
16031 if (NONDEBUG_INSN_P (next))
16035 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16036 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16037 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16038 && regno0 == DF_REF_REGNO (*use_rec))
16040 /* Return DISTANCE if OP0 is used in memory
16041 address in NEXT. */
16045 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16046 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16047 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16048 && regno0 == DF_REF_REGNO (*def_rec))
16050 /* Return -1 if OP0 is set in NEXT. */
16054 if (next == BB_END (bb))
16056 next = NEXT_INSN (next);
16060 if (distance < LEA_SEARCH_THRESHOLD)
16064 bool simple_loop = false;
16066 FOR_EACH_EDGE (e, ei, bb->succs)
16069 simple_loop = true;
16075 rtx next = BB_HEAD (bb);
16078 && distance < LEA_SEARCH_THRESHOLD)
16080 if (NONDEBUG_INSN_P (next))
16084 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
16085 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
16086 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
16087 && regno0 == DF_REF_REGNO (*use_rec))
16089 /* Return DISTANCE if OP0 is used in memory
16090 address in NEXT. */
16094 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
16095 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
16096 && !DF_REF_IS_ARTIFICIAL (*def_rec)
16097 && regno0 == DF_REF_REGNO (*def_rec))
16099 /* Return -1 if OP0 is set in NEXT. */
16104 next = NEXT_INSN (next);
16112 /* Define this macro to tune LEA priority vs ADD, it take effect when
16113 there is a dilemma of choicing LEA or ADD
16114 Negative value: ADD is more preferred than LEA
16116 Positive value: LEA is more preferred than ADD*/
16117 #define IX86_LEA_PRIORITY 2
16119 /* Return true if it is ok to optimize an ADD operation to LEA
16120 operation to avoid flag register consumation. For most processors,
16121 ADD is faster than LEA. For the processors like ATOM, if the
16122 destination register of LEA holds an actual address which will be
16123 used soon, LEA is better and otherwise ADD is better. */
16126 ix86_lea_for_add_ok (rtx insn, rtx operands[])
16128 unsigned int regno0 = true_regnum (operands[0]);
16129 unsigned int regno1 = true_regnum (operands[1]);
16130 unsigned int regno2 = true_regnum (operands[2]);
16132 /* If a = b + c, (a!=b && a!=c), must use lea form. */
16133 if (regno0 != regno1 && regno0 != regno2)
16136 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
16140 int dist_define, dist_use;
16142 /* Return false if REGNO0 isn't used in memory address. */
16143 dist_use = distance_agu_use (regno0, insn);
16147 dist_define = distance_non_agu_define (regno1, regno2, insn);
16148 if (dist_define <= 0)
16151 /* If this insn has both backward non-agu dependence and forward
16152 agu dependence, the one with short distance take effect. */
16153 if ((dist_define + IX86_LEA_PRIORITY) < dist_use)
16160 /* Return true if destination reg of SET_BODY is shift count of
16164 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
16170 /* Retrieve destination of SET_BODY. */
16171 switch (GET_CODE (set_body))
16174 set_dest = SET_DEST (set_body);
16175 if (!set_dest || !REG_P (set_dest))
16179 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
16180 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
16188 /* Retrieve shift count of USE_BODY. */
16189 switch (GET_CODE (use_body))
16192 shift_rtx = XEXP (use_body, 1);
16195 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
16196 if (ix86_dep_by_shift_count_body (set_body,
16197 XVECEXP (use_body, 0, i)))
16205 && (GET_CODE (shift_rtx) == ASHIFT
16206 || GET_CODE (shift_rtx) == LSHIFTRT
16207 || GET_CODE (shift_rtx) == ASHIFTRT
16208 || GET_CODE (shift_rtx) == ROTATE
16209 || GET_CODE (shift_rtx) == ROTATERT))
16211 rtx shift_count = XEXP (shift_rtx, 1);
16213 /* Return true if shift count is dest of SET_BODY. */
16214 if (REG_P (shift_count)
16215 && true_regnum (set_dest) == true_regnum (shift_count))
16222 /* Return true if destination reg of SET_INSN is shift count of
16226 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
16228 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
16229 PATTERN (use_insn));
16232 /* Return TRUE or FALSE depending on whether the unary operator meets the
16233 appropriate constraints. */
16236 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
16237 enum machine_mode mode ATTRIBUTE_UNUSED,
16238 rtx operands[2] ATTRIBUTE_UNUSED)
16240 /* If one of operands is memory, source and destination must match. */
16241 if ((MEM_P (operands[0])
16242 || MEM_P (operands[1]))
16243 && ! rtx_equal_p (operands[0], operands[1]))
16248 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
16249 are ok, keeping in mind the possible movddup alternative. */
16252 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
16254 if (MEM_P (operands[0]))
16255 return rtx_equal_p (operands[0], operands[1 + high]);
16256 if (MEM_P (operands[1]) && MEM_P (operands[2]))
16257 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
16261 /* Post-reload splitter for converting an SF or DFmode value in an
16262 SSE register into an unsigned SImode. */
16265 ix86_split_convert_uns_si_sse (rtx operands[])
16267 enum machine_mode vecmode;
16268 rtx value, large, zero_or_two31, input, two31, x;
16270 large = operands[1];
16271 zero_or_two31 = operands[2];
16272 input = operands[3];
16273 two31 = operands[4];
16274 vecmode = GET_MODE (large);
16275 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
16277 /* Load up the value into the low element. We must ensure that the other
16278 elements are valid floats -- zero is the easiest such value. */
16281 if (vecmode == V4SFmode)
16282 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
16284 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
16288 input = gen_rtx_REG (vecmode, REGNO (input));
16289 emit_move_insn (value, CONST0_RTX (vecmode));
16290 if (vecmode == V4SFmode)
16291 emit_insn (gen_sse_movss (value, value, input));
16293 emit_insn (gen_sse2_movsd (value, value, input));
16296 emit_move_insn (large, two31);
16297 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
16299 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
16300 emit_insn (gen_rtx_SET (VOIDmode, large, x));
16302 x = gen_rtx_AND (vecmode, zero_or_two31, large);
16303 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
16305 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
16306 emit_insn (gen_rtx_SET (VOIDmode, value, x));
16308 large = gen_rtx_REG (V4SImode, REGNO (large));
16309 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
16311 x = gen_rtx_REG (V4SImode, REGNO (value));
16312 if (vecmode == V4SFmode)
16313 emit_insn (gen_sse2_cvttps2dq (x, value));
16315 emit_insn (gen_sse2_cvttpd2dq (x, value));
16318 emit_insn (gen_xorv4si3 (value, value, large));
16321 /* Convert an unsigned DImode value into a DFmode, using only SSE.
16322 Expects the 64-bit DImode to be supplied in a pair of integral
16323 registers. Requires SSE2; will use SSE3 if available. For x86_32,
16324 -mfpmath=sse, !optimize_size only. */
16327 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
16329 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
16330 rtx int_xmm, fp_xmm;
16331 rtx biases, exponents;
16334 int_xmm = gen_reg_rtx (V4SImode);
16335 if (TARGET_INTER_UNIT_MOVES)
16336 emit_insn (gen_movdi_to_sse (int_xmm, input));
16337 else if (TARGET_SSE_SPLIT_REGS)
16339 emit_clobber (int_xmm);
16340 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
16344 x = gen_reg_rtx (V2DImode);
16345 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
16346 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
16349 x = gen_rtx_CONST_VECTOR (V4SImode,
16350 gen_rtvec (4, GEN_INT (0x43300000UL),
16351 GEN_INT (0x45300000UL),
16352 const0_rtx, const0_rtx));
16353 exponents = validize_mem (force_const_mem (V4SImode, x));
16355 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
16356 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
16358 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
16359 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
16360 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
16361 (0x1.0p84 + double(fp_value_hi_xmm)).
16362 Note these exponents differ by 32. */
16364 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
16366 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
16367 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
16368 real_ldexp (&bias_lo_rvt, &dconst1, 52);
16369 real_ldexp (&bias_hi_rvt, &dconst1, 84);
16370 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
16371 x = const_double_from_real_value (bias_hi_rvt, DFmode);
16372 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
16373 biases = validize_mem (force_const_mem (V2DFmode, biases));
16374 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
16376 /* Add the upper and lower DFmode values together. */
16378 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
16381 x = copy_to_mode_reg (V2DFmode, fp_xmm);
16382 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
16383 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
16386 ix86_expand_vector_extract (false, target, fp_xmm, 0);
16389 /* Not used, but eases macroization of patterns. */
16391 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
16392 rtx input ATTRIBUTE_UNUSED)
16394 gcc_unreachable ();
16397 /* Convert an unsigned SImode value into a DFmode. Only currently used
16398 for SSE, but applicable anywhere. */
16401 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
16403 REAL_VALUE_TYPE TWO31r;
16406 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
16407 NULL, 1, OPTAB_DIRECT);
16409 fp = gen_reg_rtx (DFmode);
16410 emit_insn (gen_floatsidf2 (fp, x));
16412 real_ldexp (&TWO31r, &dconst1, 31);
16413 x = const_double_from_real_value (TWO31r, DFmode);
16415 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
16417 emit_move_insn (target, x);
16420 /* Convert a signed DImode value into a DFmode. Only used for SSE in
16421 32-bit mode; otherwise we have a direct convert instruction. */
16424 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
16426 REAL_VALUE_TYPE TWO32r;
16427 rtx fp_lo, fp_hi, x;
16429 fp_lo = gen_reg_rtx (DFmode);
16430 fp_hi = gen_reg_rtx (DFmode);
16432 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
16434 real_ldexp (&TWO32r, &dconst1, 32);
16435 x = const_double_from_real_value (TWO32r, DFmode);
16436 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
16438 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
16440 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
16443 emit_move_insn (target, x);
16446 /* Convert an unsigned SImode value into a SFmode, using only SSE.
16447 For x86_32, -mfpmath=sse, !optimize_size only. */
16449 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
16451 REAL_VALUE_TYPE ONE16r;
16452 rtx fp_hi, fp_lo, int_hi, int_lo, x;
16454 real_ldexp (&ONE16r, &dconst1, 16);
16455 x = const_double_from_real_value (ONE16r, SFmode);
16456 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
16457 NULL, 0, OPTAB_DIRECT);
16458 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
16459 NULL, 0, OPTAB_DIRECT);
16460 fp_hi = gen_reg_rtx (SFmode);
16461 fp_lo = gen_reg_rtx (SFmode);
16462 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
16463 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
16464 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
16466 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
16468 if (!rtx_equal_p (target, fp_hi))
16469 emit_move_insn (target, fp_hi);
16472 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
16473 then replicate the value for all elements of the vector
16477 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
16484 v = gen_rtvec (4, value, value, value, value);
16485 return gen_rtx_CONST_VECTOR (V4SImode, v);
16489 v = gen_rtvec (2, value, value);
16490 return gen_rtx_CONST_VECTOR (V2DImode, v);
16494 v = gen_rtvec (8, value, value, value, value,
16495 value, value, value, value);
16497 v = gen_rtvec (8, value, CONST0_RTX (SFmode),
16498 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16499 CONST0_RTX (SFmode), CONST0_RTX (SFmode),
16500 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16501 return gen_rtx_CONST_VECTOR (V8SFmode, v);
16505 v = gen_rtvec (4, value, value, value, value);
16507 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
16508 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
16509 return gen_rtx_CONST_VECTOR (V4SFmode, v);
16513 v = gen_rtvec (4, value, value, value, value);
16515 v = gen_rtvec (4, value, CONST0_RTX (DFmode),
16516 CONST0_RTX (DFmode), CONST0_RTX (DFmode));
16517 return gen_rtx_CONST_VECTOR (V4DFmode, v);
16521 v = gen_rtvec (2, value, value);
16523 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
16524 return gen_rtx_CONST_VECTOR (V2DFmode, v);
16527 gcc_unreachable ();
16531 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
16532 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
16533 for an SSE register. If VECT is true, then replicate the mask for
16534 all elements of the vector register. If INVERT is true, then create
16535 a mask excluding the sign bit. */
16538 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
16540 enum machine_mode vec_mode, imode;
16541 HOST_WIDE_INT hi, lo;
16546 /* Find the sign bit, sign extended to 2*HWI. */
16553 mode = GET_MODE_INNER (mode);
16555 lo = 0x80000000, hi = lo < 0;
16562 mode = GET_MODE_INNER (mode);
16564 if (HOST_BITS_PER_WIDE_INT >= 64)
16565 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
16567 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
16572 vec_mode = VOIDmode;
16573 if (HOST_BITS_PER_WIDE_INT >= 64)
16576 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
16583 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
16587 lo = ~lo, hi = ~hi;
16593 mask = immed_double_const (lo, hi, imode);
16595 vec = gen_rtvec (2, v, mask);
16596 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
16597 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
16604 gcc_unreachable ();
16608 lo = ~lo, hi = ~hi;
16610 /* Force this value into the low part of a fp vector constant. */
16611 mask = immed_double_const (lo, hi, imode);
16612 mask = gen_lowpart (mode, mask);
16614 if (vec_mode == VOIDmode)
16615 return force_reg (mode, mask);
16617 v = ix86_build_const_vector (vec_mode, vect, mask);
16618 return force_reg (vec_mode, v);
16621 /* Generate code for floating point ABS or NEG. */
16624 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
16627 rtx mask, set, dst, src;
16628 bool use_sse = false;
16629 bool vector_mode = VECTOR_MODE_P (mode);
16630 enum machine_mode vmode = mode;
16634 else if (mode == TFmode)
16636 else if (TARGET_SSE_MATH)
16638 use_sse = SSE_FLOAT_MODE_P (mode);
16639 if (mode == SFmode)
16641 else if (mode == DFmode)
16645 /* NEG and ABS performed with SSE use bitwise mask operations.
16646 Create the appropriate mask now. */
16648 mask = ix86_build_signbit_mask (vmode, vector_mode, code == ABS);
16655 set = gen_rtx_fmt_e (code, mode, src);
16656 set = gen_rtx_SET (VOIDmode, dst, set);
16663 use = gen_rtx_USE (VOIDmode, mask);
16665 par = gen_rtvec (2, set, use);
16668 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
16669 par = gen_rtvec (3, set, use, clob);
16671 emit_insn (gen_rtx_PARALLEL (VOIDmode, par));
16677 /* Expand a copysign operation. Special case operand 0 being a constant. */
16680 ix86_expand_copysign (rtx operands[])
16682 enum machine_mode mode, vmode;
16683 rtx dest, op0, op1, mask, nmask;
16685 dest = operands[0];
16689 mode = GET_MODE (dest);
16691 if (mode == SFmode)
16693 else if (mode == DFmode)
16698 if (GET_CODE (op0) == CONST_DOUBLE)
16700 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
16702 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
16703 op0 = simplify_unary_operation (ABS, mode, op0, mode);
16705 if (mode == SFmode || mode == DFmode)
16707 if (op0 == CONST0_RTX (mode))
16708 op0 = CONST0_RTX (vmode);
16711 rtx v = ix86_build_const_vector (vmode, false, op0);
16713 op0 = force_reg (vmode, v);
16716 else if (op0 != CONST0_RTX (mode))
16717 op0 = force_reg (mode, op0);
16719 mask = ix86_build_signbit_mask (vmode, 0, 0);
16721 if (mode == SFmode)
16722 copysign_insn = gen_copysignsf3_const;
16723 else if (mode == DFmode)
16724 copysign_insn = gen_copysigndf3_const;
16726 copysign_insn = gen_copysigntf3_const;
16728 emit_insn (copysign_insn (dest, op0, op1, mask));
16732 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
16734 nmask = ix86_build_signbit_mask (vmode, 0, 1);
16735 mask = ix86_build_signbit_mask (vmode, 0, 0);
16737 if (mode == SFmode)
16738 copysign_insn = gen_copysignsf3_var;
16739 else if (mode == DFmode)
16740 copysign_insn = gen_copysigndf3_var;
16742 copysign_insn = gen_copysigntf3_var;
16744 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
16748 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
16749 be a constant, and so has already been expanded into a vector constant. */
16752 ix86_split_copysign_const (rtx operands[])
16754 enum machine_mode mode, vmode;
16755 rtx dest, op0, mask, x;
16757 dest = operands[0];
16759 mask = operands[3];
16761 mode = GET_MODE (dest);
16762 vmode = GET_MODE (mask);
16764 dest = simplify_gen_subreg (vmode, dest, mode, 0);
16765 x = gen_rtx_AND (vmode, dest, mask);
16766 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16768 if (op0 != CONST0_RTX (vmode))
16770 x = gen_rtx_IOR (vmode, dest, op0);
16771 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16775 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
16776 so we have to do two masks. */
16779 ix86_split_copysign_var (rtx operands[])
16781 enum machine_mode mode, vmode;
16782 rtx dest, scratch, op0, op1, mask, nmask, x;
16784 dest = operands[0];
16785 scratch = operands[1];
16788 nmask = operands[4];
16789 mask = operands[5];
16791 mode = GET_MODE (dest);
16792 vmode = GET_MODE (mask);
16794 if (rtx_equal_p (op0, op1))
16796 /* Shouldn't happen often (it's useless, obviously), but when it does
16797 we'd generate incorrect code if we continue below. */
16798 emit_move_insn (dest, op0);
16802 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
16804 gcc_assert (REGNO (op1) == REGNO (scratch));
16806 x = gen_rtx_AND (vmode, scratch, mask);
16807 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
16810 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
16811 x = gen_rtx_NOT (vmode, dest);
16812 x = gen_rtx_AND (vmode, x, op0);
16813 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16817 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
16819 x = gen_rtx_AND (vmode, scratch, mask);
16821 else /* alternative 2,4 */
16823 gcc_assert (REGNO (mask) == REGNO (scratch));
16824 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
16825 x = gen_rtx_AND (vmode, scratch, op1);
16827 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
16829 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
16831 dest = simplify_gen_subreg (vmode, op0, mode, 0);
16832 x = gen_rtx_AND (vmode, dest, nmask);
16834 else /* alternative 3,4 */
16836 gcc_assert (REGNO (nmask) == REGNO (dest));
16838 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
16839 x = gen_rtx_AND (vmode, dest, op0);
16841 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16844 x = gen_rtx_IOR (vmode, dest, scratch);
16845 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16848 /* Return TRUE or FALSE depending on whether the first SET in INSN
16849 has source and destination with matching CC modes, and that the
16850 CC mode is at least as constrained as REQ_MODE. */
16853 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
16856 enum machine_mode set_mode;
16858 set = PATTERN (insn);
16859 if (GET_CODE (set) == PARALLEL)
16860 set = XVECEXP (set, 0, 0);
16861 gcc_assert (GET_CODE (set) == SET);
16862 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
16864 set_mode = GET_MODE (SET_DEST (set));
16868 if (req_mode != CCNOmode
16869 && (req_mode != CCmode
16870 || XEXP (SET_SRC (set), 1) != const0_rtx))
16874 if (req_mode == CCGCmode)
16878 if (req_mode == CCGOCmode || req_mode == CCNOmode)
16882 if (req_mode == CCZmode)
16892 if (set_mode != req_mode)
16897 gcc_unreachable ();
16900 return GET_MODE (SET_SRC (set)) == set_mode;
16903 /* Generate insn patterns to do an integer compare of OPERANDS. */
16906 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
16908 enum machine_mode cmpmode;
16911 cmpmode = SELECT_CC_MODE (code, op0, op1);
16912 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
16914 /* This is very simple, but making the interface the same as in the
16915 FP case makes the rest of the code easier. */
16916 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
16917 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
16919 /* Return the test that should be put into the flags user, i.e.
16920 the bcc, scc, or cmov instruction. */
16921 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
16924 /* Figure out whether to use ordered or unordered fp comparisons.
16925 Return the appropriate mode to use. */
16928 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
16930 /* ??? In order to make all comparisons reversible, we do all comparisons
16931 non-trapping when compiling for IEEE. Once gcc is able to distinguish
16932 all forms trapping and nontrapping comparisons, we can make inequality
16933 comparisons trapping again, since it results in better code when using
16934 FCOM based compares. */
16935 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
16939 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
16941 enum machine_mode mode = GET_MODE (op0);
16943 if (SCALAR_FLOAT_MODE_P (mode))
16945 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
16946 return ix86_fp_compare_mode (code);
16951 /* Only zero flag is needed. */
16952 case EQ: /* ZF=0 */
16953 case NE: /* ZF!=0 */
16955 /* Codes needing carry flag. */
16956 case GEU: /* CF=0 */
16957 case LTU: /* CF=1 */
16958 /* Detect overflow checks. They need just the carry flag. */
16959 if (GET_CODE (op0) == PLUS
16960 && rtx_equal_p (op1, XEXP (op0, 0)))
16964 case GTU: /* CF=0 & ZF=0 */
16965 case LEU: /* CF=1 | ZF=1 */
16966 /* Detect overflow checks. They need just the carry flag. */
16967 if (GET_CODE (op0) == MINUS
16968 && rtx_equal_p (op1, XEXP (op0, 0)))
16972 /* Codes possibly doable only with sign flag when
16973 comparing against zero. */
16974 case GE: /* SF=OF or SF=0 */
16975 case LT: /* SF<>OF or SF=1 */
16976 if (op1 == const0_rtx)
16979 /* For other cases Carry flag is not required. */
16981 /* Codes doable only with sign flag when comparing
16982 against zero, but we miss jump instruction for it
16983 so we need to use relational tests against overflow
16984 that thus needs to be zero. */
16985 case GT: /* ZF=0 & SF=OF */
16986 case LE: /* ZF=1 | SF<>OF */
16987 if (op1 == const0_rtx)
16991 /* strcmp pattern do (use flags) and combine may ask us for proper
16996 gcc_unreachable ();
17000 /* Return the fixed registers used for condition codes. */
17003 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
17010 /* If two condition code modes are compatible, return a condition code
17011 mode which is compatible with both. Otherwise, return
17014 static enum machine_mode
17015 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
17020 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
17023 if ((m1 == CCGCmode && m2 == CCGOCmode)
17024 || (m1 == CCGOCmode && m2 == CCGCmode))
17030 gcc_unreachable ();
17060 /* These are only compatible with themselves, which we already
17067 /* Return a comparison we can do and that it is equivalent to
17068 swap_condition (code) apart possibly from orderedness.
17069 But, never change orderedness if TARGET_IEEE_FP, returning
17070 UNKNOWN in that case if necessary. */
17072 static enum rtx_code
17073 ix86_fp_swap_condition (enum rtx_code code)
17077 case GT: /* GTU - CF=0 & ZF=0 */
17078 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
17079 case GE: /* GEU - CF=0 */
17080 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
17081 case UNLT: /* LTU - CF=1 */
17082 return TARGET_IEEE_FP ? UNKNOWN : GT;
17083 case UNLE: /* LEU - CF=1 | ZF=1 */
17084 return TARGET_IEEE_FP ? UNKNOWN : GE;
17086 return swap_condition (code);
17090 /* Return cost of comparison CODE using the best strategy for performance.
17091 All following functions do use number of instructions as a cost metrics.
17092 In future this should be tweaked to compute bytes for optimize_size and
17093 take into account performance of various instructions on various CPUs. */
17096 ix86_fp_comparison_cost (enum rtx_code code)
17100 /* The cost of code using bit-twiddling on %ah. */
17117 arith_cost = TARGET_IEEE_FP ? 5 : 4;
17121 arith_cost = TARGET_IEEE_FP ? 6 : 4;
17124 gcc_unreachable ();
17127 switch (ix86_fp_comparison_strategy (code))
17129 case IX86_FPCMP_COMI:
17130 return arith_cost > 4 ? 3 : 2;
17131 case IX86_FPCMP_SAHF:
17132 return arith_cost > 4 ? 4 : 3;
17138 /* Return strategy to use for floating-point. We assume that fcomi is always
17139 preferrable where available, since that is also true when looking at size
17140 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
17142 enum ix86_fpcmp_strategy
17143 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
17145 /* Do fcomi/sahf based test when profitable. */
17148 return IX86_FPCMP_COMI;
17150 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
17151 return IX86_FPCMP_SAHF;
17153 return IX86_FPCMP_ARITH;
17156 /* Swap, force into registers, or otherwise massage the two operands
17157 to a fp comparison. The operands are updated in place; the new
17158 comparison code is returned. */
17160 static enum rtx_code
17161 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
17163 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
17164 rtx op0 = *pop0, op1 = *pop1;
17165 enum machine_mode op_mode = GET_MODE (op0);
17166 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
17168 /* All of the unordered compare instructions only work on registers.
17169 The same is true of the fcomi compare instructions. The XFmode
17170 compare instructions require registers except when comparing
17171 against zero or when converting operand 1 from fixed point to
17175 && (fpcmp_mode == CCFPUmode
17176 || (op_mode == XFmode
17177 && ! (standard_80387_constant_p (op0) == 1
17178 || standard_80387_constant_p (op1) == 1)
17179 && GET_CODE (op1) != FLOAT)
17180 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
17182 op0 = force_reg (op_mode, op0);
17183 op1 = force_reg (op_mode, op1);
17187 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
17188 things around if they appear profitable, otherwise force op0
17189 into a register. */
17191 if (standard_80387_constant_p (op0) == 0
17193 && ! (standard_80387_constant_p (op1) == 0
17196 enum rtx_code new_code = ix86_fp_swap_condition (code);
17197 if (new_code != UNKNOWN)
17200 tmp = op0, op0 = op1, op1 = tmp;
17206 op0 = force_reg (op_mode, op0);
17208 if (CONSTANT_P (op1))
17210 int tmp = standard_80387_constant_p (op1);
17212 op1 = validize_mem (force_const_mem (op_mode, op1));
17216 op1 = force_reg (op_mode, op1);
17219 op1 = force_reg (op_mode, op1);
17223 /* Try to rearrange the comparison to make it cheaper. */
17224 if (ix86_fp_comparison_cost (code)
17225 > ix86_fp_comparison_cost (swap_condition (code))
17226 && (REG_P (op1) || can_create_pseudo_p ()))
17229 tmp = op0, op0 = op1, op1 = tmp;
17230 code = swap_condition (code);
17232 op0 = force_reg (op_mode, op0);
17240 /* Convert comparison codes we use to represent FP comparison to integer
17241 code that will result in proper branch. Return UNKNOWN if no such code
17245 ix86_fp_compare_code_to_integer (enum rtx_code code)
17274 /* Generate insn patterns to do a floating point compare of OPERANDS. */
17277 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
17279 enum machine_mode fpcmp_mode, intcmp_mode;
17282 fpcmp_mode = ix86_fp_compare_mode (code);
17283 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
17285 /* Do fcomi/sahf based test when profitable. */
17286 switch (ix86_fp_comparison_strategy (code))
17288 case IX86_FPCMP_COMI:
17289 intcmp_mode = fpcmp_mode;
17290 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17291 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17296 case IX86_FPCMP_SAHF:
17297 intcmp_mode = fpcmp_mode;
17298 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17299 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
17303 scratch = gen_reg_rtx (HImode);
17304 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
17305 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
17308 case IX86_FPCMP_ARITH:
17309 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
17310 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
17311 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
17313 scratch = gen_reg_rtx (HImode);
17314 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
17316 /* In the unordered case, we have to check C2 for NaN's, which
17317 doesn't happen to work out to anything nice combination-wise.
17318 So do some bit twiddling on the value we've got in AH to come
17319 up with an appropriate set of condition codes. */
17321 intcmp_mode = CCNOmode;
17326 if (code == GT || !TARGET_IEEE_FP)
17328 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17333 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17334 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17335 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
17336 intcmp_mode = CCmode;
17342 if (code == LT && TARGET_IEEE_FP)
17344 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17345 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
17346 intcmp_mode = CCmode;
17351 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
17357 if (code == GE || !TARGET_IEEE_FP)
17359 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
17364 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17365 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
17371 if (code == LE && TARGET_IEEE_FP)
17373 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17374 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
17375 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17376 intcmp_mode = CCmode;
17381 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
17387 if (code == EQ && TARGET_IEEE_FP)
17389 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17390 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
17391 intcmp_mode = CCmode;
17396 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17402 if (code == NE && TARGET_IEEE_FP)
17404 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
17405 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
17411 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
17417 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17421 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
17426 gcc_unreachable ();
17434 /* Return the test that should be put into the flags user, i.e.
17435 the bcc, scc, or cmov instruction. */
17436 return gen_rtx_fmt_ee (code, VOIDmode,
17437 gen_rtx_REG (intcmp_mode, FLAGS_REG),
17442 ix86_expand_compare (enum rtx_code code, rtx op0, rtx op1)
17446 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
17447 ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
17449 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
17451 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
17452 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
17455 ret = ix86_expand_int_compare (code, op0, op1);
17461 ix86_expand_branch (enum rtx_code code, rtx op0, rtx op1, rtx label)
17463 enum machine_mode mode = GET_MODE (op0);
17475 tmp = ix86_expand_compare (code, op0, op1);
17476 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
17477 gen_rtx_LABEL_REF (VOIDmode, label),
17479 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
17486 /* Expand DImode branch into multiple compare+branch. */
17488 rtx lo[2], hi[2], label2;
17489 enum rtx_code code1, code2, code3;
17490 enum machine_mode submode;
17492 if (CONSTANT_P (op0) && !CONSTANT_P (op1))
17494 tmp = op0, op0 = op1, op1 = tmp;
17495 code = swap_condition (code);
17498 split_double_mode (mode, &op0, 1, lo+0, hi+0);
17499 split_double_mode (mode, &op1, 1, lo+1, hi+1);
17501 submode = mode == DImode ? SImode : DImode;
17503 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
17504 avoid two branches. This costs one extra insn, so disable when
17505 optimizing for size. */
17507 if ((code == EQ || code == NE)
17508 && (!optimize_insn_for_size_p ()
17509 || hi[1] == const0_rtx || lo[1] == const0_rtx))
17514 if (hi[1] != const0_rtx)
17515 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
17516 NULL_RTX, 0, OPTAB_WIDEN);
17519 if (lo[1] != const0_rtx)
17520 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
17521 NULL_RTX, 0, OPTAB_WIDEN);
17523 tmp = expand_binop (submode, ior_optab, xor1, xor0,
17524 NULL_RTX, 0, OPTAB_WIDEN);
17526 ix86_expand_branch (code, tmp, const0_rtx, label);
17530 /* Otherwise, if we are doing less-than or greater-or-equal-than,
17531 op1 is a constant and the low word is zero, then we can just
17532 examine the high word. Similarly for low word -1 and
17533 less-or-equal-than or greater-than. */
17535 if (CONST_INT_P (hi[1]))
17538 case LT: case LTU: case GE: case GEU:
17539 if (lo[1] == const0_rtx)
17541 ix86_expand_branch (code, hi[0], hi[1], label);
17545 case LE: case LEU: case GT: case GTU:
17546 if (lo[1] == constm1_rtx)
17548 ix86_expand_branch (code, hi[0], hi[1], label);
17556 /* Otherwise, we need two or three jumps. */
17558 label2 = gen_label_rtx ();
17561 code2 = swap_condition (code);
17562 code3 = unsigned_condition (code);
17566 case LT: case GT: case LTU: case GTU:
17569 case LE: code1 = LT; code2 = GT; break;
17570 case GE: code1 = GT; code2 = LT; break;
17571 case LEU: code1 = LTU; code2 = GTU; break;
17572 case GEU: code1 = GTU; code2 = LTU; break;
17574 case EQ: code1 = UNKNOWN; code2 = NE; break;
17575 case NE: code2 = UNKNOWN; break;
17578 gcc_unreachable ();
17583 * if (hi(a) < hi(b)) goto true;
17584 * if (hi(a) > hi(b)) goto false;
17585 * if (lo(a) < lo(b)) goto true;
17589 if (code1 != UNKNOWN)
17590 ix86_expand_branch (code1, hi[0], hi[1], label);
17591 if (code2 != UNKNOWN)
17592 ix86_expand_branch (code2, hi[0], hi[1], label2);
17594 ix86_expand_branch (code3, lo[0], lo[1], label);
17596 if (code2 != UNKNOWN)
17597 emit_label (label2);
17602 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC);
17607 /* Split branch based on floating point condition. */
17609 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
17610 rtx target1, rtx target2, rtx tmp, rtx pushed)
17615 if (target2 != pc_rtx)
17618 code = reverse_condition_maybe_unordered (code);
17623 condition = ix86_expand_fp_compare (code, op1, op2,
17626 /* Remove pushed operand from stack. */
17628 ix86_free_from_memory (GET_MODE (pushed));
17630 i = emit_jump_insn (gen_rtx_SET
17632 gen_rtx_IF_THEN_ELSE (VOIDmode,
17633 condition, target1, target2)));
17634 if (split_branch_probability >= 0)
17635 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
17639 ix86_expand_setcc (rtx dest, enum rtx_code code, rtx op0, rtx op1)
17643 gcc_assert (GET_MODE (dest) == QImode);
17645 ret = ix86_expand_compare (code, op0, op1);
17646 PUT_MODE (ret, QImode);
17647 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
17650 /* Expand comparison setting or clearing carry flag. Return true when
17651 successful and set pop for the operation. */
17653 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
17655 enum machine_mode mode =
17656 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
17658 /* Do not handle double-mode compares that go through special path. */
17659 if (mode == (TARGET_64BIT ? TImode : DImode))
17662 if (SCALAR_FLOAT_MODE_P (mode))
17664 rtx compare_op, compare_seq;
17666 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
17668 /* Shortcut: following common codes never translate
17669 into carry flag compares. */
17670 if (code == EQ || code == NE || code == UNEQ || code == LTGT
17671 || code == ORDERED || code == UNORDERED)
17674 /* These comparisons require zero flag; swap operands so they won't. */
17675 if ((code == GT || code == UNLE || code == LE || code == UNGT)
17676 && !TARGET_IEEE_FP)
17681 code = swap_condition (code);
17684 /* Try to expand the comparison and verify that we end up with
17685 carry flag based comparison. This fails to be true only when
17686 we decide to expand comparison using arithmetic that is not
17687 too common scenario. */
17689 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
17690 compare_seq = get_insns ();
17693 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
17694 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
17695 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
17697 code = GET_CODE (compare_op);
17699 if (code != LTU && code != GEU)
17702 emit_insn (compare_seq);
17707 if (!INTEGRAL_MODE_P (mode))
17716 /* Convert a==0 into (unsigned)a<1. */
17719 if (op1 != const0_rtx)
17722 code = (code == EQ ? LTU : GEU);
17725 /* Convert a>b into b<a or a>=b-1. */
17728 if (CONST_INT_P (op1))
17730 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
17731 /* Bail out on overflow. We still can swap operands but that
17732 would force loading of the constant into register. */
17733 if (op1 == const0_rtx
17734 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
17736 code = (code == GTU ? GEU : LTU);
17743 code = (code == GTU ? LTU : GEU);
17747 /* Convert a>=0 into (unsigned)a<0x80000000. */
17750 if (mode == DImode || op1 != const0_rtx)
17752 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
17753 code = (code == LT ? GEU : LTU);
17757 if (mode == DImode || op1 != constm1_rtx)
17759 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
17760 code = (code == LE ? GEU : LTU);
17766 /* Swapping operands may cause constant to appear as first operand. */
17767 if (!nonimmediate_operand (op0, VOIDmode))
17769 if (!can_create_pseudo_p ())
17771 op0 = force_reg (mode, op0);
17773 *pop = ix86_expand_compare (code, op0, op1);
17774 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
17779 ix86_expand_int_movcc (rtx operands[])
17781 enum rtx_code code = GET_CODE (operands[1]), compare_code;
17782 rtx compare_seq, compare_op;
17783 enum machine_mode mode = GET_MODE (operands[0]);
17784 bool sign_bit_compare_p = false;
17785 rtx op0 = XEXP (operands[1], 0);
17786 rtx op1 = XEXP (operands[1], 1);
17789 compare_op = ix86_expand_compare (code, op0, op1);
17790 compare_seq = get_insns ();
17793 compare_code = GET_CODE (compare_op);
17795 if ((op1 == const0_rtx && (code == GE || code == LT))
17796 || (op1 == constm1_rtx && (code == GT || code == LE)))
17797 sign_bit_compare_p = true;
17799 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
17800 HImode insns, we'd be swallowed in word prefix ops. */
17802 if ((mode != HImode || TARGET_FAST_PREFIX)
17803 && (mode != (TARGET_64BIT ? TImode : DImode))
17804 && CONST_INT_P (operands[2])
17805 && CONST_INT_P (operands[3]))
17807 rtx out = operands[0];
17808 HOST_WIDE_INT ct = INTVAL (operands[2]);
17809 HOST_WIDE_INT cf = INTVAL (operands[3]);
17810 HOST_WIDE_INT diff;
17813 /* Sign bit compares are better done using shifts than we do by using
17815 if (sign_bit_compare_p
17816 || ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
17818 /* Detect overlap between destination and compare sources. */
17821 if (!sign_bit_compare_p)
17824 bool fpcmp = false;
17826 compare_code = GET_CODE (compare_op);
17828 flags = XEXP (compare_op, 0);
17830 if (GET_MODE (flags) == CCFPmode
17831 || GET_MODE (flags) == CCFPUmode)
17835 = ix86_fp_compare_code_to_integer (compare_code);
17838 /* To simplify rest of code, restrict to the GEU case. */
17839 if (compare_code == LTU)
17841 HOST_WIDE_INT tmp = ct;
17844 compare_code = reverse_condition (compare_code);
17845 code = reverse_condition (code);
17850 PUT_CODE (compare_op,
17851 reverse_condition_maybe_unordered
17852 (GET_CODE (compare_op)));
17854 PUT_CODE (compare_op,
17855 reverse_condition (GET_CODE (compare_op)));
17859 if (reg_overlap_mentioned_p (out, op0)
17860 || reg_overlap_mentioned_p (out, op1))
17861 tmp = gen_reg_rtx (mode);
17863 if (mode == DImode)
17864 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
17866 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
17867 flags, compare_op));
17871 if (code == GT || code == GE)
17872 code = reverse_condition (code);
17875 HOST_WIDE_INT tmp = ct;
17880 tmp = emit_store_flag (tmp, code, op0, op1, VOIDmode, 0, -1);
17893 tmp = expand_simple_binop (mode, PLUS,
17895 copy_rtx (tmp), 1, OPTAB_DIRECT);
17906 tmp = expand_simple_binop (mode, IOR,
17908 copy_rtx (tmp), 1, OPTAB_DIRECT);
17910 else if (diff == -1 && ct)
17920 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
17922 tmp = expand_simple_binop (mode, PLUS,
17923 copy_rtx (tmp), GEN_INT (cf),
17924 copy_rtx (tmp), 1, OPTAB_DIRECT);
17932 * andl cf - ct, dest
17942 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
17945 tmp = expand_simple_binop (mode, AND,
17947 gen_int_mode (cf - ct, mode),
17948 copy_rtx (tmp), 1, OPTAB_DIRECT);
17950 tmp = expand_simple_binop (mode, PLUS,
17951 copy_rtx (tmp), GEN_INT (ct),
17952 copy_rtx (tmp), 1, OPTAB_DIRECT);
17955 if (!rtx_equal_p (tmp, out))
17956 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
17963 enum machine_mode cmp_mode = GET_MODE (op0);
17966 tmp = ct, ct = cf, cf = tmp;
17969 if (SCALAR_FLOAT_MODE_P (cmp_mode))
17971 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
17973 /* We may be reversing unordered compare to normal compare, that
17974 is not valid in general (we may convert non-trapping condition
17975 to trapping one), however on i386 we currently emit all
17976 comparisons unordered. */
17977 compare_code = reverse_condition_maybe_unordered (compare_code);
17978 code = reverse_condition_maybe_unordered (code);
17982 compare_code = reverse_condition (compare_code);
17983 code = reverse_condition (code);
17987 compare_code = UNKNOWN;
17988 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
17989 && CONST_INT_P (op1))
17991 if (op1 == const0_rtx
17992 && (code == LT || code == GE))
17993 compare_code = code;
17994 else if (op1 == constm1_rtx)
17998 else if (code == GT)
18003 /* Optimize dest = (op0 < 0) ? -1 : cf. */
18004 if (compare_code != UNKNOWN
18005 && GET_MODE (op0) == GET_MODE (out)
18006 && (cf == -1 || ct == -1))
18008 /* If lea code below could be used, only optimize
18009 if it results in a 2 insn sequence. */
18011 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
18012 || diff == 3 || diff == 5 || diff == 9)
18013 || (compare_code == LT && ct == -1)
18014 || (compare_code == GE && cf == -1))
18017 * notl op1 (if necessary)
18025 code = reverse_condition (code);
18028 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18030 out = expand_simple_binop (mode, IOR,
18032 out, 1, OPTAB_DIRECT);
18033 if (out != operands[0])
18034 emit_move_insn (operands[0], out);
18041 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
18042 || diff == 3 || diff == 5 || diff == 9)
18043 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
18045 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
18051 * lea cf(dest*(ct-cf)),dest
18055 * This also catches the degenerate setcc-only case.
18061 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18064 /* On x86_64 the lea instruction operates on Pmode, so we need
18065 to get arithmetics done in proper mode to match. */
18067 tmp = copy_rtx (out);
18071 out1 = copy_rtx (out);
18072 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
18076 tmp = gen_rtx_PLUS (mode, tmp, out1);
18082 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
18085 if (!rtx_equal_p (tmp, out))
18088 out = force_operand (tmp, copy_rtx (out));
18090 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
18092 if (!rtx_equal_p (out, operands[0]))
18093 emit_move_insn (operands[0], copy_rtx (out));
18099 * General case: Jumpful:
18100 * xorl dest,dest cmpl op1, op2
18101 * cmpl op1, op2 movl ct, dest
18102 * setcc dest jcc 1f
18103 * decl dest movl cf, dest
18104 * andl (cf-ct),dest 1:
18107 * Size 20. Size 14.
18109 * This is reasonably steep, but branch mispredict costs are
18110 * high on modern cpus, so consider failing only if optimizing
18114 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18115 && BRANCH_COST (optimize_insn_for_speed_p (),
18120 enum machine_mode cmp_mode = GET_MODE (op0);
18125 if (SCALAR_FLOAT_MODE_P (cmp_mode))
18127 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
18129 /* We may be reversing unordered compare to normal compare,
18130 that is not valid in general (we may convert non-trapping
18131 condition to trapping one), however on i386 we currently
18132 emit all comparisons unordered. */
18133 code = reverse_condition_maybe_unordered (code);
18137 code = reverse_condition (code);
18138 if (compare_code != UNKNOWN)
18139 compare_code = reverse_condition (compare_code);
18143 if (compare_code != UNKNOWN)
18145 /* notl op1 (if needed)
18150 For x < 0 (resp. x <= -1) there will be no notl,
18151 so if possible swap the constants to get rid of the
18153 True/false will be -1/0 while code below (store flag
18154 followed by decrement) is 0/-1, so the constants need
18155 to be exchanged once more. */
18157 if (compare_code == GE || !cf)
18159 code = reverse_condition (code);
18164 HOST_WIDE_INT tmp = cf;
18169 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, -1);
18173 out = emit_store_flag (out, code, op0, op1, VOIDmode, 0, 1);
18175 out = expand_simple_binop (mode, PLUS, copy_rtx (out),
18177 copy_rtx (out), 1, OPTAB_DIRECT);
18180 out = expand_simple_binop (mode, AND, copy_rtx (out),
18181 gen_int_mode (cf - ct, mode),
18182 copy_rtx (out), 1, OPTAB_DIRECT);
18184 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
18185 copy_rtx (out), 1, OPTAB_DIRECT);
18186 if (!rtx_equal_p (out, operands[0]))
18187 emit_move_insn (operands[0], copy_rtx (out));
18193 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
18195 /* Try a few things more with specific constants and a variable. */
18198 rtx var, orig_out, out, tmp;
18200 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
18203 /* If one of the two operands is an interesting constant, load a
18204 constant with the above and mask it in with a logical operation. */
18206 if (CONST_INT_P (operands[2]))
18209 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
18210 operands[3] = constm1_rtx, op = and_optab;
18211 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
18212 operands[3] = const0_rtx, op = ior_optab;
18216 else if (CONST_INT_P (operands[3]))
18219 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
18220 operands[2] = constm1_rtx, op = and_optab;
18221 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
18222 operands[2] = const0_rtx, op = ior_optab;
18229 orig_out = operands[0];
18230 tmp = gen_reg_rtx (mode);
18233 /* Recurse to get the constant loaded. */
18234 if (ix86_expand_int_movcc (operands) == 0)
18237 /* Mask in the interesting variable. */
18238 out = expand_binop (mode, op, var, tmp, orig_out, 0,
18240 if (!rtx_equal_p (out, orig_out))
18241 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
18247 * For comparison with above,
18257 if (! nonimmediate_operand (operands[2], mode))
18258 operands[2] = force_reg (mode, operands[2]);
18259 if (! nonimmediate_operand (operands[3], mode))
18260 operands[3] = force_reg (mode, operands[3]);
18262 if (! register_operand (operands[2], VOIDmode)
18264 || ! register_operand (operands[3], VOIDmode)))
18265 operands[2] = force_reg (mode, operands[2]);
18268 && ! register_operand (operands[3], VOIDmode))
18269 operands[3] = force_reg (mode, operands[3]);
18271 emit_insn (compare_seq);
18272 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18273 gen_rtx_IF_THEN_ELSE (mode,
18274 compare_op, operands[2],
18279 /* Swap, force into registers, or otherwise massage the two operands
18280 to an sse comparison with a mask result. Thus we differ a bit from
18281 ix86_prepare_fp_compare_args which expects to produce a flags result.
18283 The DEST operand exists to help determine whether to commute commutative
18284 operators. The POP0/POP1 operands are updated in place. The new
18285 comparison code is returned, or UNKNOWN if not implementable. */
18287 static enum rtx_code
18288 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
18289 rtx *pop0, rtx *pop1)
18297 /* We have no LTGT as an operator. We could implement it with
18298 NE & ORDERED, but this requires an extra temporary. It's
18299 not clear that it's worth it. */
18306 /* These are supported directly. */
18313 /* For commutative operators, try to canonicalize the destination
18314 operand to be first in the comparison - this helps reload to
18315 avoid extra moves. */
18316 if (!dest || !rtx_equal_p (dest, *pop1))
18324 /* These are not supported directly. Swap the comparison operands
18325 to transform into something that is supported. */
18329 code = swap_condition (code);
18333 gcc_unreachable ();
18339 /* Detect conditional moves that exactly match min/max operational
18340 semantics. Note that this is IEEE safe, as long as we don't
18341 interchange the operands.
18343 Returns FALSE if this conditional move doesn't match a MIN/MAX,
18344 and TRUE if the operation is successful and instructions are emitted. */
18347 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
18348 rtx cmp_op1, rtx if_true, rtx if_false)
18350 enum machine_mode mode;
18356 else if (code == UNGE)
18359 if_true = if_false;
18365 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
18367 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
18372 mode = GET_MODE (dest);
18374 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
18375 but MODE may be a vector mode and thus not appropriate. */
18376 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
18378 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
18381 if_true = force_reg (mode, if_true);
18382 v = gen_rtvec (2, if_true, if_false);
18383 tmp = gen_rtx_UNSPEC (mode, v, u);
18387 code = is_min ? SMIN : SMAX;
18388 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
18391 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
18395 /* Expand an sse vector comparison. Return the register with the result. */
18398 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
18399 rtx op_true, rtx op_false)
18401 enum machine_mode mode = GET_MODE (dest);
18404 cmp_op0 = force_reg (mode, cmp_op0);
18405 if (!nonimmediate_operand (cmp_op1, mode))
18406 cmp_op1 = force_reg (mode, cmp_op1);
18409 || reg_overlap_mentioned_p (dest, op_true)
18410 || reg_overlap_mentioned_p (dest, op_false))
18411 dest = gen_reg_rtx (mode);
18413 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
18414 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18419 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
18420 operations. This is used for both scalar and vector conditional moves. */
18423 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
18425 enum machine_mode mode = GET_MODE (dest);
18428 if (op_false == CONST0_RTX (mode))
18430 op_true = force_reg (mode, op_true);
18431 x = gen_rtx_AND (mode, cmp, op_true);
18432 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18434 else if (op_true == CONST0_RTX (mode))
18436 op_false = force_reg (mode, op_false);
18437 x = gen_rtx_NOT (mode, cmp);
18438 x = gen_rtx_AND (mode, x, op_false);
18439 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18441 else if (TARGET_XOP)
18443 rtx pcmov = gen_rtx_SET (mode, dest,
18444 gen_rtx_IF_THEN_ELSE (mode, cmp,
18451 op_true = force_reg (mode, op_true);
18452 op_false = force_reg (mode, op_false);
18454 t2 = gen_reg_rtx (mode);
18456 t3 = gen_reg_rtx (mode);
18460 x = gen_rtx_AND (mode, op_true, cmp);
18461 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
18463 x = gen_rtx_NOT (mode, cmp);
18464 x = gen_rtx_AND (mode, x, op_false);
18465 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
18467 x = gen_rtx_IOR (mode, t3, t2);
18468 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
18472 /* Expand a floating-point conditional move. Return true if successful. */
18475 ix86_expand_fp_movcc (rtx operands[])
18477 enum machine_mode mode = GET_MODE (operands[0]);
18478 enum rtx_code code = GET_CODE (operands[1]);
18479 rtx tmp, compare_op;
18480 rtx op0 = XEXP (operands[1], 0);
18481 rtx op1 = XEXP (operands[1], 1);
18483 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
18485 enum machine_mode cmode;
18487 /* Since we've no cmove for sse registers, don't force bad register
18488 allocation just to gain access to it. Deny movcc when the
18489 comparison mode doesn't match the move mode. */
18490 cmode = GET_MODE (op0);
18491 if (cmode == VOIDmode)
18492 cmode = GET_MODE (op1);
18496 code = ix86_prepare_sse_fp_compare_args (operands[0], code, &op0, &op1);
18497 if (code == UNKNOWN)
18500 if (ix86_expand_sse_fp_minmax (operands[0], code, op0, op1,
18501 operands[2], operands[3]))
18504 tmp = ix86_expand_sse_cmp (operands[0], code, op0, op1,
18505 operands[2], operands[3]);
18506 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
18510 /* The floating point conditional move instructions don't directly
18511 support conditions resulting from a signed integer comparison. */
18513 compare_op = ix86_expand_compare (code, op0, op1);
18514 if (!fcmov_comparison_operator (compare_op, VOIDmode))
18516 tmp = gen_reg_rtx (QImode);
18517 ix86_expand_setcc (tmp, code, op0, op1);
18519 compare_op = ix86_expand_compare (NE, tmp, const0_rtx);
18522 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
18523 gen_rtx_IF_THEN_ELSE (mode, compare_op,
18524 operands[2], operands[3])));
18529 /* Expand a floating-point vector conditional move; a vcond operation
18530 rather than a movcc operation. */
18533 ix86_expand_fp_vcond (rtx operands[])
18535 enum rtx_code code = GET_CODE (operands[3]);
18538 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
18539 &operands[4], &operands[5]);
18540 if (code == UNKNOWN)
18543 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
18544 operands[5], operands[1], operands[2]))
18547 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
18548 operands[1], operands[2]);
18549 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
18553 /* Expand a signed/unsigned integral vector conditional move. */
18556 ix86_expand_int_vcond (rtx operands[])
18558 enum machine_mode mode = GET_MODE (operands[0]);
18559 enum rtx_code code = GET_CODE (operands[3]);
18560 bool negate = false;
18563 cop0 = operands[4];
18564 cop1 = operands[5];
18566 /* XOP supports all of the comparisons on all vector int types. */
18569 /* Canonicalize the comparison to EQ, GT, GTU. */
18580 code = reverse_condition (code);
18586 code = reverse_condition (code);
18592 code = swap_condition (code);
18593 x = cop0, cop0 = cop1, cop1 = x;
18597 gcc_unreachable ();
18600 /* Only SSE4.1/SSE4.2 supports V2DImode. */
18601 if (mode == V2DImode)
18606 /* SSE4.1 supports EQ. */
18607 if (!TARGET_SSE4_1)
18613 /* SSE4.2 supports GT/GTU. */
18614 if (!TARGET_SSE4_2)
18619 gcc_unreachable ();
18623 /* Unsigned parallel compare is not supported by the hardware.
18624 Play some tricks to turn this into a signed comparison
18628 cop0 = force_reg (mode, cop0);
18636 rtx (*gen_sub3) (rtx, rtx, rtx);
18638 /* Subtract (-(INT MAX) - 1) from both operands to make
18640 mask = ix86_build_signbit_mask (mode, true, false);
18641 gen_sub3 = (mode == V4SImode
18642 ? gen_subv4si3 : gen_subv2di3);
18643 t1 = gen_reg_rtx (mode);
18644 emit_insn (gen_sub3 (t1, cop0, mask));
18646 t2 = gen_reg_rtx (mode);
18647 emit_insn (gen_sub3 (t2, cop1, mask));
18657 /* Perform a parallel unsigned saturating subtraction. */
18658 x = gen_reg_rtx (mode);
18659 emit_insn (gen_rtx_SET (VOIDmode, x,
18660 gen_rtx_US_MINUS (mode, cop0, cop1)));
18663 cop1 = CONST0_RTX (mode);
18669 gcc_unreachable ();
18674 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
18675 operands[1+negate], operands[2-negate]);
18677 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
18678 operands[2-negate]);
18682 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
18683 true if we should do zero extension, else sign extension. HIGH_P is
18684 true if we want the N/2 high elements, else the low elements. */
18687 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
18689 enum machine_mode imode = GET_MODE (operands[1]);
18694 rtx (*unpack)(rtx, rtx);
18700 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
18702 unpack = gen_sse4_1_sign_extendv8qiv8hi2;
18706 unpack = gen_sse4_1_zero_extendv4hiv4si2;
18708 unpack = gen_sse4_1_sign_extendv4hiv4si2;
18712 unpack = gen_sse4_1_zero_extendv2siv2di2;
18714 unpack = gen_sse4_1_sign_extendv2siv2di2;
18717 gcc_unreachable ();
18722 /* Shift higher 8 bytes to lower 8 bytes. */
18723 tmp = gen_reg_rtx (imode);
18724 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, tmp),
18725 gen_lowpart (V1TImode, operands[1]),
18731 emit_insn (unpack (operands[0], tmp));
18735 rtx (*unpack)(rtx, rtx, rtx);
18741 unpack = gen_vec_interleave_highv16qi;
18743 unpack = gen_vec_interleave_lowv16qi;
18747 unpack = gen_vec_interleave_highv8hi;
18749 unpack = gen_vec_interleave_lowv8hi;
18753 unpack = gen_vec_interleave_highv4si;
18755 unpack = gen_vec_interleave_lowv4si;
18758 gcc_unreachable ();
18761 dest = gen_lowpart (imode, operands[0]);
18764 tmp = force_reg (imode, CONST0_RTX (imode));
18766 tmp = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
18767 operands[1], pc_rtx, pc_rtx);
18769 emit_insn (unpack (dest, operands[1], tmp));
18773 /* Expand conditional increment or decrement using adb/sbb instructions.
18774 The default case using setcc followed by the conditional move can be
18775 done by generic code. */
18777 ix86_expand_int_addcc (rtx operands[])
18779 enum rtx_code code = GET_CODE (operands[1]);
18781 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
18783 rtx val = const0_rtx;
18784 bool fpcmp = false;
18785 enum machine_mode mode;
18786 rtx op0 = XEXP (operands[1], 0);
18787 rtx op1 = XEXP (operands[1], 1);
18789 if (operands[3] != const1_rtx
18790 && operands[3] != constm1_rtx)
18792 if (!ix86_expand_carry_flag_compare (code, op0, op1, &compare_op))
18794 code = GET_CODE (compare_op);
18796 flags = XEXP (compare_op, 0);
18798 if (GET_MODE (flags) == CCFPmode
18799 || GET_MODE (flags) == CCFPUmode)
18802 code = ix86_fp_compare_code_to_integer (code);
18809 PUT_CODE (compare_op,
18810 reverse_condition_maybe_unordered
18811 (GET_CODE (compare_op)));
18813 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
18816 mode = GET_MODE (operands[0]);
18818 /* Construct either adc or sbb insn. */
18819 if ((code == LTU) == (operands[3] == constm1_rtx))
18824 insn = gen_subqi3_carry;
18827 insn = gen_subhi3_carry;
18830 insn = gen_subsi3_carry;
18833 insn = gen_subdi3_carry;
18836 gcc_unreachable ();
18844 insn = gen_addqi3_carry;
18847 insn = gen_addhi3_carry;
18850 insn = gen_addsi3_carry;
18853 insn = gen_adddi3_carry;
18856 gcc_unreachable ();
18859 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
18865 /* Split operands 0 and 1 into half-mode parts. Similar to split_double_mode,
18866 but works for floating pointer parameters and nonoffsetable memories.
18867 For pushes, it returns just stack offsets; the values will be saved
18868 in the right order. Maximally three parts are generated. */
18871 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
18876 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
18878 size = (GET_MODE_SIZE (mode) + 4) / 8;
18880 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
18881 gcc_assert (size >= 2 && size <= 4);
18883 /* Optimize constant pool reference to immediates. This is used by fp
18884 moves, that force all constants to memory to allow combining. */
18885 if (MEM_P (operand) && MEM_READONLY_P (operand))
18887 rtx tmp = maybe_get_pool_constant (operand);
18892 if (MEM_P (operand) && !offsettable_memref_p (operand))
18894 /* The only non-offsetable memories we handle are pushes. */
18895 int ok = push_operand (operand, VOIDmode);
18899 operand = copy_rtx (operand);
18900 PUT_MODE (operand, Pmode);
18901 parts[0] = parts[1] = parts[2] = parts[3] = operand;
18905 if (GET_CODE (operand) == CONST_VECTOR)
18907 enum machine_mode imode = int_mode_for_mode (mode);
18908 /* Caution: if we looked through a constant pool memory above,
18909 the operand may actually have a different mode now. That's
18910 ok, since we want to pun this all the way back to an integer. */
18911 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
18912 gcc_assert (operand != NULL);
18918 if (mode == DImode)
18919 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
18924 if (REG_P (operand))
18926 gcc_assert (reload_completed);
18927 for (i = 0; i < size; i++)
18928 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
18930 else if (offsettable_memref_p (operand))
18932 operand = adjust_address (operand, SImode, 0);
18933 parts[0] = operand;
18934 for (i = 1; i < size; i++)
18935 parts[i] = adjust_address (operand, SImode, 4 * i);
18937 else if (GET_CODE (operand) == CONST_DOUBLE)
18942 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
18946 real_to_target (l, &r, mode);
18947 parts[3] = gen_int_mode (l[3], SImode);
18948 parts[2] = gen_int_mode (l[2], SImode);
18951 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
18952 parts[2] = gen_int_mode (l[2], SImode);
18955 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
18958 gcc_unreachable ();
18960 parts[1] = gen_int_mode (l[1], SImode);
18961 parts[0] = gen_int_mode (l[0], SImode);
18964 gcc_unreachable ();
18969 if (mode == TImode)
18970 split_double_mode (mode, &operand, 1, &parts[0], &parts[1]);
18971 if (mode == XFmode || mode == TFmode)
18973 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
18974 if (REG_P (operand))
18976 gcc_assert (reload_completed);
18977 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
18978 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
18980 else if (offsettable_memref_p (operand))
18982 operand = adjust_address (operand, DImode, 0);
18983 parts[0] = operand;
18984 parts[1] = adjust_address (operand, upper_mode, 8);
18986 else if (GET_CODE (operand) == CONST_DOUBLE)
18991 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
18992 real_to_target (l, &r, mode);
18994 /* Do not use shift by 32 to avoid warning on 32bit systems. */
18995 if (HOST_BITS_PER_WIDE_INT >= 64)
18998 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
18999 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
19002 parts[0] = immed_double_const (l[0], l[1], DImode);
19004 if (upper_mode == SImode)
19005 parts[1] = gen_int_mode (l[2], SImode);
19006 else if (HOST_BITS_PER_WIDE_INT >= 64)
19009 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
19010 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
19013 parts[1] = immed_double_const (l[2], l[3], DImode);
19016 gcc_unreachable ();
19023 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
19024 Return false when normal moves are needed; true when all required
19025 insns have been emitted. Operands 2-4 contain the input values
19026 int the correct order; operands 5-7 contain the output values. */
19029 ix86_split_long_move (rtx operands[])
19034 int collisions = 0;
19035 enum machine_mode mode = GET_MODE (operands[0]);
19036 bool collisionparts[4];
19038 /* The DFmode expanders may ask us to move double.
19039 For 64bit target this is single move. By hiding the fact
19040 here we simplify i386.md splitters. */
19041 if (TARGET_64BIT && GET_MODE_SIZE (GET_MODE (operands[0])) == 8)
19043 /* Optimize constant pool reference to immediates. This is used by
19044 fp moves, that force all constants to memory to allow combining. */
19046 if (MEM_P (operands[1])
19047 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
19048 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
19049 operands[1] = get_pool_constant (XEXP (operands[1], 0));
19050 if (push_operand (operands[0], VOIDmode))
19052 operands[0] = copy_rtx (operands[0]);
19053 PUT_MODE (operands[0], Pmode);
19056 operands[0] = gen_lowpart (DImode, operands[0]);
19057 operands[1] = gen_lowpart (DImode, operands[1]);
19058 emit_move_insn (operands[0], operands[1]);
19062 /* The only non-offsettable memory we handle is push. */
19063 if (push_operand (operands[0], VOIDmode))
19066 gcc_assert (!MEM_P (operands[0])
19067 || offsettable_memref_p (operands[0]));
19069 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
19070 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
19072 /* When emitting push, take care for source operands on the stack. */
19073 if (push && MEM_P (operands[1])
19074 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
19076 rtx src_base = XEXP (part[1][nparts - 1], 0);
19078 /* Compensate for the stack decrement by 4. */
19079 if (!TARGET_64BIT && nparts == 3
19080 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
19081 src_base = plus_constant (src_base, 4);
19083 /* src_base refers to the stack pointer and is
19084 automatically decreased by emitted push. */
19085 for (i = 0; i < nparts; i++)
19086 part[1][i] = change_address (part[1][i],
19087 GET_MODE (part[1][i]), src_base);
19090 /* We need to do copy in the right order in case an address register
19091 of the source overlaps the destination. */
19092 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
19096 for (i = 0; i < nparts; i++)
19099 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
19100 if (collisionparts[i])
19104 /* Collision in the middle part can be handled by reordering. */
19105 if (collisions == 1 && nparts == 3 && collisionparts [1])
19107 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19108 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19110 else if (collisions == 1
19112 && (collisionparts [1] || collisionparts [2]))
19114 if (collisionparts [1])
19116 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
19117 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
19121 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
19122 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
19126 /* If there are more collisions, we can't handle it by reordering.
19127 Do an lea to the last part and use only one colliding move. */
19128 else if (collisions > 1)
19134 base = part[0][nparts - 1];
19136 /* Handle the case when the last part isn't valid for lea.
19137 Happens in 64-bit mode storing the 12-byte XFmode. */
19138 if (GET_MODE (base) != Pmode)
19139 base = gen_rtx_REG (Pmode, REGNO (base));
19141 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
19142 part[1][0] = replace_equiv_address (part[1][0], base);
19143 for (i = 1; i < nparts; i++)
19145 tmp = plus_constant (base, UNITS_PER_WORD * i);
19146 part[1][i] = replace_equiv_address (part[1][i], tmp);
19157 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
19158 emit_insn (gen_addsi3 (stack_pointer_rtx,
19159 stack_pointer_rtx, GEN_INT (-4)));
19160 emit_move_insn (part[0][2], part[1][2]);
19162 else if (nparts == 4)
19164 emit_move_insn (part[0][3], part[1][3]);
19165 emit_move_insn (part[0][2], part[1][2]);
19170 /* In 64bit mode we don't have 32bit push available. In case this is
19171 register, it is OK - we will just use larger counterpart. We also
19172 retype memory - these comes from attempt to avoid REX prefix on
19173 moving of second half of TFmode value. */
19174 if (GET_MODE (part[1][1]) == SImode)
19176 switch (GET_CODE (part[1][1]))
19179 part[1][1] = adjust_address (part[1][1], DImode, 0);
19183 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
19187 gcc_unreachable ();
19190 if (GET_MODE (part[1][0]) == SImode)
19191 part[1][0] = part[1][1];
19194 emit_move_insn (part[0][1], part[1][1]);
19195 emit_move_insn (part[0][0], part[1][0]);
19199 /* Choose correct order to not overwrite the source before it is copied. */
19200 if ((REG_P (part[0][0])
19201 && REG_P (part[1][1])
19202 && (REGNO (part[0][0]) == REGNO (part[1][1])
19204 && REGNO (part[0][0]) == REGNO (part[1][2]))
19206 && REGNO (part[0][0]) == REGNO (part[1][3]))))
19208 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
19210 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
19212 operands[2 + i] = part[0][j];
19213 operands[6 + i] = part[1][j];
19218 for (i = 0; i < nparts; i++)
19220 operands[2 + i] = part[0][i];
19221 operands[6 + i] = part[1][i];
19225 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
19226 if (optimize_insn_for_size_p ())
19228 for (j = 0; j < nparts - 1; j++)
19229 if (CONST_INT_P (operands[6 + j])
19230 && operands[6 + j] != const0_rtx
19231 && REG_P (operands[2 + j]))
19232 for (i = j; i < nparts - 1; i++)
19233 if (CONST_INT_P (operands[7 + i])
19234 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
19235 operands[7 + i] = operands[2 + j];
19238 for (i = 0; i < nparts; i++)
19239 emit_move_insn (operands[2 + i], operands[6 + i]);
19244 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
19245 left shift by a constant, either using a single shift or
19246 a sequence of add instructions. */
19249 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
19251 rtx (*insn)(rtx, rtx, rtx);
19254 || (count * ix86_cost->add <= ix86_cost->shift_const
19255 && !optimize_insn_for_size_p ()))
19257 insn = mode == DImode ? gen_addsi3 : gen_adddi3;
19258 while (count-- > 0)
19259 emit_insn (insn (operand, operand, operand));
19263 insn = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19264 emit_insn (insn (operand, operand, GEN_INT (count)));
19269 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
19271 rtx (*gen_ashl3)(rtx, rtx, rtx);
19272 rtx (*gen_shld)(rtx, rtx, rtx);
19273 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19275 rtx low[2], high[2];
19278 if (CONST_INT_P (operands[2]))
19280 split_double_mode (mode, operands, 2, low, high);
19281 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19283 if (count >= half_width)
19285 emit_move_insn (high[0], low[1]);
19286 emit_move_insn (low[0], const0_rtx);
19288 if (count > half_width)
19289 ix86_expand_ashl_const (high[0], count - half_width, mode);
19293 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19295 if (!rtx_equal_p (operands[0], operands[1]))
19296 emit_move_insn (operands[0], operands[1]);
19298 emit_insn (gen_shld (high[0], low[0], GEN_INT (count)));
19299 ix86_expand_ashl_const (low[0], count, mode);
19304 split_double_mode (mode, operands, 1, low, high);
19306 gen_ashl3 = mode == DImode ? gen_ashlsi3 : gen_ashldi3;
19308 if (operands[1] == const1_rtx)
19310 /* Assuming we've chosen a QImode capable registers, then 1 << N
19311 can be done with two 32/64-bit shifts, no branches, no cmoves. */
19312 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
19314 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
19316 ix86_expand_clear (low[0]);
19317 ix86_expand_clear (high[0]);
19318 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (half_width)));
19320 d = gen_lowpart (QImode, low[0]);
19321 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19322 s = gen_rtx_EQ (QImode, flags, const0_rtx);
19323 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19325 d = gen_lowpart (QImode, high[0]);
19326 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
19327 s = gen_rtx_NE (QImode, flags, const0_rtx);
19328 emit_insn (gen_rtx_SET (VOIDmode, d, s));
19331 /* Otherwise, we can get the same results by manually performing
19332 a bit extract operation on bit 5/6, and then performing the two
19333 shifts. The two methods of getting 0/1 into low/high are exactly
19334 the same size. Avoiding the shift in the bit extract case helps
19335 pentium4 a bit; no one else seems to care much either way. */
19338 enum machine_mode half_mode;
19339 rtx (*gen_lshr3)(rtx, rtx, rtx);
19340 rtx (*gen_and3)(rtx, rtx, rtx);
19341 rtx (*gen_xor3)(rtx, rtx, rtx);
19342 HOST_WIDE_INT bits;
19345 if (mode == DImode)
19347 half_mode = SImode;
19348 gen_lshr3 = gen_lshrsi3;
19349 gen_and3 = gen_andsi3;
19350 gen_xor3 = gen_xorsi3;
19355 half_mode = DImode;
19356 gen_lshr3 = gen_lshrdi3;
19357 gen_and3 = gen_anddi3;
19358 gen_xor3 = gen_xordi3;
19362 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
19363 x = gen_rtx_ZERO_EXTEND (half_mode, operands[2]);
19365 x = gen_lowpart (half_mode, operands[2]);
19366 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
19368 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (bits)));
19369 emit_insn (gen_and3 (high[0], high[0], const1_rtx));
19370 emit_move_insn (low[0], high[0]);
19371 emit_insn (gen_xor3 (low[0], low[0], const1_rtx));
19374 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19375 emit_insn (gen_ashl3 (high[0], high[0], operands[2]));
19379 if (operands[1] == constm1_rtx)
19381 /* For -1 << N, we can avoid the shld instruction, because we
19382 know that we're shifting 0...31/63 ones into a -1. */
19383 emit_move_insn (low[0], constm1_rtx);
19384 if (optimize_insn_for_size_p ())
19385 emit_move_insn (high[0], low[0]);
19387 emit_move_insn (high[0], constm1_rtx);
19391 gen_shld = mode == DImode ? gen_x86_shld : gen_x86_64_shld;
19393 if (!rtx_equal_p (operands[0], operands[1]))
19394 emit_move_insn (operands[0], operands[1]);
19396 split_double_mode (mode, operands, 1, low, high);
19397 emit_insn (gen_shld (high[0], low[0], operands[2]));
19400 emit_insn (gen_ashl3 (low[0], low[0], operands[2]));
19402 if (TARGET_CMOVE && scratch)
19404 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19405 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19407 ix86_expand_clear (scratch);
19408 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
19412 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19413 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19415 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
19420 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
19422 rtx (*gen_ashr3)(rtx, rtx, rtx)
19423 = mode == DImode ? gen_ashrsi3 : gen_ashrdi3;
19424 rtx (*gen_shrd)(rtx, rtx, rtx);
19425 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19427 rtx low[2], high[2];
19430 if (CONST_INT_P (operands[2]))
19432 split_double_mode (mode, operands, 2, low, high);
19433 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19435 if (count == GET_MODE_BITSIZE (mode) - 1)
19437 emit_move_insn (high[0], high[1]);
19438 emit_insn (gen_ashr3 (high[0], high[0],
19439 GEN_INT (half_width - 1)));
19440 emit_move_insn (low[0], high[0]);
19443 else if (count >= half_width)
19445 emit_move_insn (low[0], high[1]);
19446 emit_move_insn (high[0], low[0]);
19447 emit_insn (gen_ashr3 (high[0], high[0],
19448 GEN_INT (half_width - 1)));
19450 if (count > half_width)
19451 emit_insn (gen_ashr3 (low[0], low[0],
19452 GEN_INT (count - half_width)));
19456 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19458 if (!rtx_equal_p (operands[0], operands[1]))
19459 emit_move_insn (operands[0], operands[1]);
19461 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19462 emit_insn (gen_ashr3 (high[0], high[0], GEN_INT (count)));
19467 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19469 if (!rtx_equal_p (operands[0], operands[1]))
19470 emit_move_insn (operands[0], operands[1]);
19472 split_double_mode (mode, operands, 1, low, high);
19474 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19475 emit_insn (gen_ashr3 (high[0], high[0], operands[2]));
19477 if (TARGET_CMOVE && scratch)
19479 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19480 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19482 emit_move_insn (scratch, high[0]);
19483 emit_insn (gen_ashr3 (scratch, scratch,
19484 GEN_INT (half_width - 1)));
19485 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19490 rtx (*gen_x86_shift_adj_3)(rtx, rtx, rtx)
19491 = mode == DImode ? gen_x86_shiftsi_adj_3 : gen_x86_shiftdi_adj_3;
19493 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
19499 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
19501 rtx (*gen_lshr3)(rtx, rtx, rtx)
19502 = mode == DImode ? gen_lshrsi3 : gen_lshrdi3;
19503 rtx (*gen_shrd)(rtx, rtx, rtx);
19504 int half_width = GET_MODE_BITSIZE (mode) >> 1;
19506 rtx low[2], high[2];
19509 if (CONST_INT_P (operands[2]))
19511 split_double_mode (mode, operands, 2, low, high);
19512 count = INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1);
19514 if (count >= half_width)
19516 emit_move_insn (low[0], high[1]);
19517 ix86_expand_clear (high[0]);
19519 if (count > half_width)
19520 emit_insn (gen_lshr3 (low[0], low[0],
19521 GEN_INT (count - half_width)));
19525 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19527 if (!rtx_equal_p (operands[0], operands[1]))
19528 emit_move_insn (operands[0], operands[1]);
19530 emit_insn (gen_shrd (low[0], high[0], GEN_INT (count)));
19531 emit_insn (gen_lshr3 (high[0], high[0], GEN_INT (count)));
19536 gen_shrd = mode == DImode ? gen_x86_shrd : gen_x86_64_shrd;
19538 if (!rtx_equal_p (operands[0], operands[1]))
19539 emit_move_insn (operands[0], operands[1]);
19541 split_double_mode (mode, operands, 1, low, high);
19543 emit_insn (gen_shrd (low[0], high[0], operands[2]));
19544 emit_insn (gen_lshr3 (high[0], high[0], operands[2]));
19546 if (TARGET_CMOVE && scratch)
19548 rtx (*gen_x86_shift_adj_1)(rtx, rtx, rtx, rtx)
19549 = mode == DImode ? gen_x86_shiftsi_adj_1 : gen_x86_shiftdi_adj_1;
19551 ix86_expand_clear (scratch);
19552 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
19557 rtx (*gen_x86_shift_adj_2)(rtx, rtx, rtx)
19558 = mode == DImode ? gen_x86_shiftsi_adj_2 : gen_x86_shiftdi_adj_2;
19560 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
19565 /* Predict just emitted jump instruction to be taken with probability PROB. */
19567 predict_jump (int prob)
19569 rtx insn = get_last_insn ();
19570 gcc_assert (JUMP_P (insn));
19571 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
19574 /* Helper function for the string operations below. Dest VARIABLE whether
19575 it is aligned to VALUE bytes. If true, jump to the label. */
19577 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
19579 rtx label = gen_label_rtx ();
19580 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
19581 if (GET_MODE (variable) == DImode)
19582 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
19584 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
19585 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
19588 predict_jump (REG_BR_PROB_BASE * 50 / 100);
19590 predict_jump (REG_BR_PROB_BASE * 90 / 100);
19594 /* Adjust COUNTER by the VALUE. */
19596 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
19598 rtx (*gen_add)(rtx, rtx, rtx)
19599 = GET_MODE (countreg) == DImode ? gen_adddi3 : gen_addsi3;
19601 emit_insn (gen_add (countreg, countreg, GEN_INT (-value)));
19604 /* Zero extend possibly SImode EXP to Pmode register. */
19606 ix86_zero_extend_to_Pmode (rtx exp)
19609 if (GET_MODE (exp) == VOIDmode)
19610 return force_reg (Pmode, exp);
19611 if (GET_MODE (exp) == Pmode)
19612 return copy_to_mode_reg (Pmode, exp);
19613 r = gen_reg_rtx (Pmode);
19614 emit_insn (gen_zero_extendsidi2 (r, exp));
19618 /* Divide COUNTREG by SCALE. */
19620 scale_counter (rtx countreg, int scale)
19626 if (CONST_INT_P (countreg))
19627 return GEN_INT (INTVAL (countreg) / scale);
19628 gcc_assert (REG_P (countreg));
19630 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
19631 GEN_INT (exact_log2 (scale)),
19632 NULL, 1, OPTAB_DIRECT);
19636 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
19637 DImode for constant loop counts. */
19639 static enum machine_mode
19640 counter_mode (rtx count_exp)
19642 if (GET_MODE (count_exp) != VOIDmode)
19643 return GET_MODE (count_exp);
19644 if (!CONST_INT_P (count_exp))
19646 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
19651 /* When SRCPTR is non-NULL, output simple loop to move memory
19652 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
19653 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
19654 equivalent loop to set memory by VALUE (supposed to be in MODE).
19656 The size is rounded down to whole number of chunk size moved at once.
19657 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
19661 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
19662 rtx destptr, rtx srcptr, rtx value,
19663 rtx count, enum machine_mode mode, int unroll,
19666 rtx out_label, top_label, iter, tmp;
19667 enum machine_mode iter_mode = counter_mode (count);
19668 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
19669 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
19675 top_label = gen_label_rtx ();
19676 out_label = gen_label_rtx ();
19677 iter = gen_reg_rtx (iter_mode);
19679 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
19680 NULL, 1, OPTAB_DIRECT);
19681 /* Those two should combine. */
19682 if (piece_size == const1_rtx)
19684 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
19686 predict_jump (REG_BR_PROB_BASE * 10 / 100);
19688 emit_move_insn (iter, const0_rtx);
19690 emit_label (top_label);
19692 tmp = convert_modes (Pmode, iter_mode, iter, true);
19693 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
19694 destmem = change_address (destmem, mode, x_addr);
19698 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
19699 srcmem = change_address (srcmem, mode, y_addr);
19701 /* When unrolling for chips that reorder memory reads and writes,
19702 we can save registers by using single temporary.
19703 Also using 4 temporaries is overkill in 32bit mode. */
19704 if (!TARGET_64BIT && 0)
19706 for (i = 0; i < unroll; i++)
19711 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
19713 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
19715 emit_move_insn (destmem, srcmem);
19721 gcc_assert (unroll <= 4);
19722 for (i = 0; i < unroll; i++)
19724 tmpreg[i] = gen_reg_rtx (mode);
19728 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
19730 emit_move_insn (tmpreg[i], srcmem);
19732 for (i = 0; i < unroll; i++)
19737 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
19739 emit_move_insn (destmem, tmpreg[i]);
19744 for (i = 0; i < unroll; i++)
19748 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
19749 emit_move_insn (destmem, value);
19752 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
19753 true, OPTAB_LIB_WIDEN);
19755 emit_move_insn (iter, tmp);
19757 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
19759 if (expected_size != -1)
19761 expected_size /= GET_MODE_SIZE (mode) * unroll;
19762 if (expected_size == 0)
19764 else if (expected_size > REG_BR_PROB_BASE)
19765 predict_jump (REG_BR_PROB_BASE - 1);
19767 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
19770 predict_jump (REG_BR_PROB_BASE * 80 / 100);
19771 iter = ix86_zero_extend_to_Pmode (iter);
19772 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
19773 true, OPTAB_LIB_WIDEN);
19774 if (tmp != destptr)
19775 emit_move_insn (destptr, tmp);
19778 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
19779 true, OPTAB_LIB_WIDEN);
19781 emit_move_insn (srcptr, tmp);
19783 emit_label (out_label);
19786 /* Output "rep; mov" instruction.
19787 Arguments have same meaning as for previous function */
19789 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
19790 rtx destptr, rtx srcptr,
19792 enum machine_mode mode)
19797 HOST_WIDE_INT rounded_count;
19799 /* If the size is known, it is shorter to use rep movs. */
19800 if (mode == QImode && CONST_INT_P (count)
19801 && !(INTVAL (count) & 3))
19804 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
19805 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
19806 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
19807 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
19808 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
19809 if (mode != QImode)
19811 destexp = gen_rtx_ASHIFT (Pmode, countreg,
19812 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
19813 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
19814 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
19815 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
19816 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
19820 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
19821 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
19823 if (CONST_INT_P (count))
19825 rounded_count = (INTVAL (count)
19826 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
19827 destmem = shallow_copy_rtx (destmem);
19828 srcmem = shallow_copy_rtx (srcmem);
19829 set_mem_size (destmem, rounded_count);
19830 set_mem_size (srcmem, rounded_count);
19834 if (MEM_SIZE_KNOWN_P (destmem))
19835 clear_mem_size (destmem);
19836 if (MEM_SIZE_KNOWN_P (srcmem))
19837 clear_mem_size (srcmem);
19839 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
19843 /* Output "rep; stos" instruction.
19844 Arguments have same meaning as for previous function */
19846 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
19847 rtx count, enum machine_mode mode,
19852 HOST_WIDE_INT rounded_count;
19854 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
19855 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
19856 value = force_reg (mode, gen_lowpart (mode, value));
19857 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
19858 if (mode != QImode)
19860 destexp = gen_rtx_ASHIFT (Pmode, countreg,
19861 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
19862 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
19865 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
19866 if (orig_value == const0_rtx && CONST_INT_P (count))
19868 rounded_count = (INTVAL (count)
19869 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
19870 destmem = shallow_copy_rtx (destmem);
19871 set_mem_size (destmem, rounded_count);
19873 else if (MEM_SIZE_KNOWN_P (destmem))
19874 clear_mem_size (destmem);
19875 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
19879 emit_strmov (rtx destmem, rtx srcmem,
19880 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
19882 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
19883 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
19884 emit_insn (gen_strmov (destptr, dest, srcptr, src));
19887 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
19889 expand_movmem_epilogue (rtx destmem, rtx srcmem,
19890 rtx destptr, rtx srcptr, rtx count, int max_size)
19893 if (CONST_INT_P (count))
19895 HOST_WIDE_INT countval = INTVAL (count);
19898 if ((countval & 0x10) && max_size > 16)
19902 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
19903 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
19906 gcc_unreachable ();
19909 if ((countval & 0x08) && max_size > 8)
19912 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
19915 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
19916 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
19920 if ((countval & 0x04) && max_size > 4)
19922 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
19925 if ((countval & 0x02) && max_size > 2)
19927 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
19930 if ((countval & 0x01) && max_size > 1)
19932 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
19939 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
19940 count, 1, OPTAB_DIRECT);
19941 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
19942 count, QImode, 1, 4);
19946 /* When there are stringops, we can cheaply increase dest and src pointers.
19947 Otherwise we save code size by maintaining offset (zero is readily
19948 available from preceding rep operation) and using x86 addressing modes.
19950 if (TARGET_SINGLE_STRINGOP)
19954 rtx label = ix86_expand_aligntest (count, 4, true);
19955 src = change_address (srcmem, SImode, srcptr);
19956 dest = change_address (destmem, SImode, destptr);
19957 emit_insn (gen_strmov (destptr, dest, srcptr, src));
19958 emit_label (label);
19959 LABEL_NUSES (label) = 1;
19963 rtx label = ix86_expand_aligntest (count, 2, true);
19964 src = change_address (srcmem, HImode, srcptr);
19965 dest = change_address (destmem, HImode, destptr);
19966 emit_insn (gen_strmov (destptr, dest, srcptr, src));
19967 emit_label (label);
19968 LABEL_NUSES (label) = 1;
19972 rtx label = ix86_expand_aligntest (count, 1, true);
19973 src = change_address (srcmem, QImode, srcptr);
19974 dest = change_address (destmem, QImode, destptr);
19975 emit_insn (gen_strmov (destptr, dest, srcptr, src));
19976 emit_label (label);
19977 LABEL_NUSES (label) = 1;
19982 rtx offset = force_reg (Pmode, const0_rtx);
19987 rtx label = ix86_expand_aligntest (count, 4, true);
19988 src = change_address (srcmem, SImode, srcptr);
19989 dest = change_address (destmem, SImode, destptr);
19990 emit_move_insn (dest, src);
19991 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
19992 true, OPTAB_LIB_WIDEN);
19994 emit_move_insn (offset, tmp);
19995 emit_label (label);
19996 LABEL_NUSES (label) = 1;
20000 rtx label = ix86_expand_aligntest (count, 2, true);
20001 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20002 src = change_address (srcmem, HImode, tmp);
20003 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20004 dest = change_address (destmem, HImode, tmp);
20005 emit_move_insn (dest, src);
20006 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
20007 true, OPTAB_LIB_WIDEN);
20009 emit_move_insn (offset, tmp);
20010 emit_label (label);
20011 LABEL_NUSES (label) = 1;
20015 rtx label = ix86_expand_aligntest (count, 1, true);
20016 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
20017 src = change_address (srcmem, QImode, tmp);
20018 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
20019 dest = change_address (destmem, QImode, tmp);
20020 emit_move_insn (dest, src);
20021 emit_label (label);
20022 LABEL_NUSES (label) = 1;
20027 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20029 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
20030 rtx count, int max_size)
20033 expand_simple_binop (counter_mode (count), AND, count,
20034 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
20035 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
20036 gen_lowpart (QImode, value), count, QImode,
20040 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
20042 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
20046 if (CONST_INT_P (count))
20048 HOST_WIDE_INT countval = INTVAL (count);
20051 if ((countval & 0x10) && max_size > 16)
20055 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20056 emit_insn (gen_strset (destptr, dest, value));
20057 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
20058 emit_insn (gen_strset (destptr, dest, value));
20061 gcc_unreachable ();
20064 if ((countval & 0x08) && max_size > 8)
20068 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
20069 emit_insn (gen_strset (destptr, dest, value));
20073 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20074 emit_insn (gen_strset (destptr, dest, value));
20075 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
20076 emit_insn (gen_strset (destptr, dest, value));
20080 if ((countval & 0x04) && max_size > 4)
20082 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
20083 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20086 if ((countval & 0x02) && max_size > 2)
20088 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
20089 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20092 if ((countval & 0x01) && max_size > 1)
20094 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
20095 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20102 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
20107 rtx label = ix86_expand_aligntest (count, 16, true);
20110 dest = change_address (destmem, DImode, destptr);
20111 emit_insn (gen_strset (destptr, dest, value));
20112 emit_insn (gen_strset (destptr, dest, value));
20116 dest = change_address (destmem, SImode, destptr);
20117 emit_insn (gen_strset (destptr, dest, value));
20118 emit_insn (gen_strset (destptr, dest, value));
20119 emit_insn (gen_strset (destptr, dest, value));
20120 emit_insn (gen_strset (destptr, dest, value));
20122 emit_label (label);
20123 LABEL_NUSES (label) = 1;
20127 rtx label = ix86_expand_aligntest (count, 8, true);
20130 dest = change_address (destmem, DImode, destptr);
20131 emit_insn (gen_strset (destptr, dest, value));
20135 dest = change_address (destmem, SImode, destptr);
20136 emit_insn (gen_strset (destptr, dest, value));
20137 emit_insn (gen_strset (destptr, dest, value));
20139 emit_label (label);
20140 LABEL_NUSES (label) = 1;
20144 rtx label = ix86_expand_aligntest (count, 4, true);
20145 dest = change_address (destmem, SImode, destptr);
20146 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
20147 emit_label (label);
20148 LABEL_NUSES (label) = 1;
20152 rtx label = ix86_expand_aligntest (count, 2, true);
20153 dest = change_address (destmem, HImode, destptr);
20154 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
20155 emit_label (label);
20156 LABEL_NUSES (label) = 1;
20160 rtx label = ix86_expand_aligntest (count, 1, true);
20161 dest = change_address (destmem, QImode, destptr);
20162 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
20163 emit_label (label);
20164 LABEL_NUSES (label) = 1;
20168 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
20169 DESIRED_ALIGNMENT. */
20171 expand_movmem_prologue (rtx destmem, rtx srcmem,
20172 rtx destptr, rtx srcptr, rtx count,
20173 int align, int desired_alignment)
20175 if (align <= 1 && desired_alignment > 1)
20177 rtx label = ix86_expand_aligntest (destptr, 1, false);
20178 srcmem = change_address (srcmem, QImode, srcptr);
20179 destmem = change_address (destmem, QImode, destptr);
20180 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20181 ix86_adjust_counter (count, 1);
20182 emit_label (label);
20183 LABEL_NUSES (label) = 1;
20185 if (align <= 2 && desired_alignment > 2)
20187 rtx label = ix86_expand_aligntest (destptr, 2, false);
20188 srcmem = change_address (srcmem, HImode, srcptr);
20189 destmem = change_address (destmem, HImode, destptr);
20190 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20191 ix86_adjust_counter (count, 2);
20192 emit_label (label);
20193 LABEL_NUSES (label) = 1;
20195 if (align <= 4 && desired_alignment > 4)
20197 rtx label = ix86_expand_aligntest (destptr, 4, false);
20198 srcmem = change_address (srcmem, SImode, srcptr);
20199 destmem = change_address (destmem, SImode, destptr);
20200 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
20201 ix86_adjust_counter (count, 4);
20202 emit_label (label);
20203 LABEL_NUSES (label) = 1;
20205 gcc_assert (desired_alignment <= 8);
20208 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
20209 ALIGN_BYTES is how many bytes need to be copied. */
20211 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
20212 int desired_align, int align_bytes)
20215 rtx orig_dst = dst;
20216 rtx orig_src = src;
20218 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
20219 if (src_align_bytes >= 0)
20220 src_align_bytes = desired_align - src_align_bytes;
20221 if (align_bytes & 1)
20223 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20224 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
20226 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20228 if (align_bytes & 2)
20230 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20231 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
20232 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20233 set_mem_align (dst, 2 * BITS_PER_UNIT);
20234 if (src_align_bytes >= 0
20235 && (src_align_bytes & 1) == (align_bytes & 1)
20236 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
20237 set_mem_align (src, 2 * BITS_PER_UNIT);
20239 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20241 if (align_bytes & 4)
20243 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20244 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
20245 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20246 set_mem_align (dst, 4 * BITS_PER_UNIT);
20247 if (src_align_bytes >= 0)
20249 unsigned int src_align = 0;
20250 if ((src_align_bytes & 3) == (align_bytes & 3))
20252 else if ((src_align_bytes & 1) == (align_bytes & 1))
20254 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20255 set_mem_align (src, src_align * BITS_PER_UNIT);
20258 emit_insn (gen_strmov (destreg, dst, srcreg, src));
20260 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20261 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
20262 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20263 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20264 if (src_align_bytes >= 0)
20266 unsigned int src_align = 0;
20267 if ((src_align_bytes & 7) == (align_bytes & 7))
20269 else if ((src_align_bytes & 3) == (align_bytes & 3))
20271 else if ((src_align_bytes & 1) == (align_bytes & 1))
20273 if (src_align > (unsigned int) desired_align)
20274 src_align = desired_align;
20275 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
20276 set_mem_align (src, src_align * BITS_PER_UNIT);
20278 if (MEM_SIZE_KNOWN_P (orig_dst))
20279 set_mem_size (dst, MEM_SIZE (orig_dst) - align_bytes);
20280 if (MEM_SIZE_KNOWN_P (orig_src))
20281 set_mem_size (src, MEM_SIZE (orig_src) - align_bytes);
20286 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
20287 DESIRED_ALIGNMENT. */
20289 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
20290 int align, int desired_alignment)
20292 if (align <= 1 && desired_alignment > 1)
20294 rtx label = ix86_expand_aligntest (destptr, 1, false);
20295 destmem = change_address (destmem, QImode, destptr);
20296 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
20297 ix86_adjust_counter (count, 1);
20298 emit_label (label);
20299 LABEL_NUSES (label) = 1;
20301 if (align <= 2 && desired_alignment > 2)
20303 rtx label = ix86_expand_aligntest (destptr, 2, false);
20304 destmem = change_address (destmem, HImode, destptr);
20305 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
20306 ix86_adjust_counter (count, 2);
20307 emit_label (label);
20308 LABEL_NUSES (label) = 1;
20310 if (align <= 4 && desired_alignment > 4)
20312 rtx label = ix86_expand_aligntest (destptr, 4, false);
20313 destmem = change_address (destmem, SImode, destptr);
20314 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
20315 ix86_adjust_counter (count, 4);
20316 emit_label (label);
20317 LABEL_NUSES (label) = 1;
20319 gcc_assert (desired_alignment <= 8);
20322 /* Set enough from DST to align DST known to by aligned by ALIGN to
20323 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
20325 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
20326 int desired_align, int align_bytes)
20329 rtx orig_dst = dst;
20330 if (align_bytes & 1)
20332 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
20334 emit_insn (gen_strset (destreg, dst,
20335 gen_lowpart (QImode, value)));
20337 if (align_bytes & 2)
20339 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
20340 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
20341 set_mem_align (dst, 2 * BITS_PER_UNIT);
20343 emit_insn (gen_strset (destreg, dst,
20344 gen_lowpart (HImode, value)));
20346 if (align_bytes & 4)
20348 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
20349 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
20350 set_mem_align (dst, 4 * BITS_PER_UNIT);
20352 emit_insn (gen_strset (destreg, dst,
20353 gen_lowpart (SImode, value)));
20355 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
20356 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
20357 set_mem_align (dst, desired_align * BITS_PER_UNIT);
20358 if (MEM_SIZE_KNOWN_P (orig_dst))
20359 set_mem_size (dst, MEM_SIZE (orig_dst) - align_bytes);
20363 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
20364 static enum stringop_alg
20365 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
20366 int *dynamic_check)
20368 const struct stringop_algs * algs;
20369 bool optimize_for_speed;
20370 /* Algorithms using the rep prefix want at least edi and ecx;
20371 additionally, memset wants eax and memcpy wants esi. Don't
20372 consider such algorithms if the user has appropriated those
20373 registers for their own purposes. */
20374 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
20376 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
20378 #define ALG_USABLE_P(alg) (rep_prefix_usable \
20379 || (alg != rep_prefix_1_byte \
20380 && alg != rep_prefix_4_byte \
20381 && alg != rep_prefix_8_byte))
20382 const struct processor_costs *cost;
20384 /* Even if the string operation call is cold, we still might spend a lot
20385 of time processing large blocks. */
20386 if (optimize_function_for_size_p (cfun)
20387 || (optimize_insn_for_size_p ()
20388 && expected_size != -1 && expected_size < 256))
20389 optimize_for_speed = false;
20391 optimize_for_speed = true;
20393 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
20395 *dynamic_check = -1;
20397 algs = &cost->memset[TARGET_64BIT != 0];
20399 algs = &cost->memcpy[TARGET_64BIT != 0];
20400 if (ix86_stringop_alg != no_stringop && ALG_USABLE_P (ix86_stringop_alg))
20401 return ix86_stringop_alg;
20402 /* rep; movq or rep; movl is the smallest variant. */
20403 else if (!optimize_for_speed)
20405 if (!count || (count & 3))
20406 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
20408 return rep_prefix_usable ? rep_prefix_4_byte : loop;
20410 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
20412 else if (expected_size != -1 && expected_size < 4)
20413 return loop_1_byte;
20414 else if (expected_size != -1)
20417 enum stringop_alg alg = libcall;
20418 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20420 /* We get here if the algorithms that were not libcall-based
20421 were rep-prefix based and we are unable to use rep prefixes
20422 based on global register usage. Break out of the loop and
20423 use the heuristic below. */
20424 if (algs->size[i].max == 0)
20426 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
20428 enum stringop_alg candidate = algs->size[i].alg;
20430 if (candidate != libcall && ALG_USABLE_P (candidate))
20432 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
20433 last non-libcall inline algorithm. */
20434 if (TARGET_INLINE_ALL_STRINGOPS)
20436 /* When the current size is best to be copied by a libcall,
20437 but we are still forced to inline, run the heuristic below
20438 that will pick code for medium sized blocks. */
20439 if (alg != libcall)
20443 else if (ALG_USABLE_P (candidate))
20447 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
20449 /* When asked to inline the call anyway, try to pick meaningful choice.
20450 We look for maximal size of block that is faster to copy by hand and
20451 take blocks of at most of that size guessing that average size will
20452 be roughly half of the block.
20454 If this turns out to be bad, we might simply specify the preferred
20455 choice in ix86_costs. */
20456 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20457 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
20460 enum stringop_alg alg;
20462 bool any_alg_usable_p = true;
20464 for (i = 0; i < MAX_STRINGOP_ALGS; i++)
20466 enum stringop_alg candidate = algs->size[i].alg;
20467 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
20469 if (candidate != libcall && candidate
20470 && ALG_USABLE_P (candidate))
20471 max = algs->size[i].max;
20473 /* If there aren't any usable algorithms, then recursing on
20474 smaller sizes isn't going to find anything. Just return the
20475 simple byte-at-a-time copy loop. */
20476 if (!any_alg_usable_p)
20478 /* Pick something reasonable. */
20479 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20480 *dynamic_check = 128;
20481 return loop_1_byte;
20485 alg = decide_alg (count, max / 2, memset, dynamic_check);
20486 gcc_assert (*dynamic_check == -1);
20487 gcc_assert (alg != libcall);
20488 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
20489 *dynamic_check = max;
20492 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
20493 #undef ALG_USABLE_P
20496 /* Decide on alignment. We know that the operand is already aligned to ALIGN
20497 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
20499 decide_alignment (int align,
20500 enum stringop_alg alg,
20503 int desired_align = 0;
20507 gcc_unreachable ();
20509 case unrolled_loop:
20510 desired_align = GET_MODE_SIZE (Pmode);
20512 case rep_prefix_8_byte:
20515 case rep_prefix_4_byte:
20516 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20517 copying whole cacheline at once. */
20518 if (TARGET_PENTIUMPRO)
20523 case rep_prefix_1_byte:
20524 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
20525 copying whole cacheline at once. */
20526 if (TARGET_PENTIUMPRO)
20540 if (desired_align < align)
20541 desired_align = align;
20542 if (expected_size != -1 && expected_size < 4)
20543 desired_align = align;
20544 return desired_align;
20547 /* Return the smallest power of 2 greater than VAL. */
20549 smallest_pow2_greater_than (int val)
20557 /* Expand string move (memcpy) operation. Use i386 string operations
20558 when profitable. expand_setmem contains similar code. The code
20559 depends upon architecture, block size and alignment, but always has
20560 the same overall structure:
20562 1) Prologue guard: Conditional that jumps up to epilogues for small
20563 blocks that can be handled by epilogue alone. This is faster
20564 but also needed for correctness, since prologue assume the block
20565 is larger than the desired alignment.
20567 Optional dynamic check for size and libcall for large
20568 blocks is emitted here too, with -minline-stringops-dynamically.
20570 2) Prologue: copy first few bytes in order to get destination
20571 aligned to DESIRED_ALIGN. It is emitted only when ALIGN is less
20572 than DESIRED_ALIGN and up to DESIRED_ALIGN - ALIGN bytes can be
20573 copied. We emit either a jump tree on power of two sized
20574 blocks, or a byte loop.
20576 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
20577 with specified algorithm.
20579 4) Epilogue: code copying tail of the block that is too small to be
20580 handled by main body (or up to size guarded by prologue guard). */
20583 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
20584 rtx expected_align_exp, rtx expected_size_exp)
20590 rtx jump_around_label = NULL;
20591 HOST_WIDE_INT align = 1;
20592 unsigned HOST_WIDE_INT count = 0;
20593 HOST_WIDE_INT expected_size = -1;
20594 int size_needed = 0, epilogue_size_needed;
20595 int desired_align = 0, align_bytes = 0;
20596 enum stringop_alg alg;
20598 bool need_zero_guard = false;
20600 if (CONST_INT_P (align_exp))
20601 align = INTVAL (align_exp);
20602 /* i386 can do misaligned access on reasonably increased cost. */
20603 if (CONST_INT_P (expected_align_exp)
20604 && INTVAL (expected_align_exp) > align)
20605 align = INTVAL (expected_align_exp);
20606 /* ALIGN is the minimum of destination and source alignment, but we care here
20607 just about destination alignment. */
20608 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
20609 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
20611 if (CONST_INT_P (count_exp))
20612 count = expected_size = INTVAL (count_exp);
20613 if (CONST_INT_P (expected_size_exp) && count == 0)
20614 expected_size = INTVAL (expected_size_exp);
20616 /* Make sure we don't need to care about overflow later on. */
20617 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
20620 /* Step 0: Decide on preferred algorithm, desired alignment and
20621 size of chunks to be copied by main loop. */
20623 alg = decide_alg (count, expected_size, false, &dynamic_check);
20624 desired_align = decide_alignment (align, alg, expected_size);
20626 if (!TARGET_ALIGN_STRINGOPS)
20627 align = desired_align;
20629 if (alg == libcall)
20631 gcc_assert (alg != no_stringop);
20633 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
20634 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
20635 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
20640 gcc_unreachable ();
20642 need_zero_guard = true;
20643 size_needed = GET_MODE_SIZE (Pmode);
20645 case unrolled_loop:
20646 need_zero_guard = true;
20647 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
20649 case rep_prefix_8_byte:
20652 case rep_prefix_4_byte:
20655 case rep_prefix_1_byte:
20659 need_zero_guard = true;
20664 epilogue_size_needed = size_needed;
20666 /* Step 1: Prologue guard. */
20668 /* Alignment code needs count to be in register. */
20669 if (CONST_INT_P (count_exp) && desired_align > align)
20671 if (INTVAL (count_exp) > desired_align
20672 && INTVAL (count_exp) > size_needed)
20675 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
20676 if (align_bytes <= 0)
20679 align_bytes = desired_align - align_bytes;
20681 if (align_bytes == 0)
20682 count_exp = force_reg (counter_mode (count_exp), count_exp);
20684 gcc_assert (desired_align >= 1 && align >= 1);
20686 /* Ensure that alignment prologue won't copy past end of block. */
20687 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
20689 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
20690 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
20691 Make sure it is power of 2. */
20692 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
20696 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
20698 /* If main algorithm works on QImode, no epilogue is needed.
20699 For small sizes just don't align anything. */
20700 if (size_needed == 1)
20701 desired_align = align;
20708 label = gen_label_rtx ();
20709 emit_cmp_and_jump_insns (count_exp,
20710 GEN_INT (epilogue_size_needed),
20711 LTU, 0, counter_mode (count_exp), 1, label);
20712 if (expected_size == -1 || expected_size < epilogue_size_needed)
20713 predict_jump (REG_BR_PROB_BASE * 60 / 100);
20715 predict_jump (REG_BR_PROB_BASE * 20 / 100);
20719 /* Emit code to decide on runtime whether library call or inline should be
20721 if (dynamic_check != -1)
20723 if (CONST_INT_P (count_exp))
20725 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
20727 emit_block_move_via_libcall (dst, src, count_exp, false);
20728 count_exp = const0_rtx;
20734 rtx hot_label = gen_label_rtx ();
20735 jump_around_label = gen_label_rtx ();
20736 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
20737 LEU, 0, GET_MODE (count_exp), 1, hot_label);
20738 predict_jump (REG_BR_PROB_BASE * 90 / 100);
20739 emit_block_move_via_libcall (dst, src, count_exp, false);
20740 emit_jump (jump_around_label);
20741 emit_label (hot_label);
20745 /* Step 2: Alignment prologue. */
20747 if (desired_align > align)
20749 if (align_bytes == 0)
20751 /* Except for the first move in epilogue, we no longer know
20752 constant offset in aliasing info. It don't seems to worth
20753 the pain to maintain it for the first move, so throw away
20755 src = change_address (src, BLKmode, srcreg);
20756 dst = change_address (dst, BLKmode, destreg);
20757 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
20762 /* If we know how many bytes need to be stored before dst is
20763 sufficiently aligned, maintain aliasing info accurately. */
20764 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
20765 desired_align, align_bytes);
20766 count_exp = plus_constant (count_exp, -align_bytes);
20767 count -= align_bytes;
20769 if (need_zero_guard
20770 && (count < (unsigned HOST_WIDE_INT) size_needed
20771 || (align_bytes == 0
20772 && count < ((unsigned HOST_WIDE_INT) size_needed
20773 + desired_align - align))))
20775 /* It is possible that we copied enough so the main loop will not
20777 gcc_assert (size_needed > 1);
20778 if (label == NULL_RTX)
20779 label = gen_label_rtx ();
20780 emit_cmp_and_jump_insns (count_exp,
20781 GEN_INT (size_needed),
20782 LTU, 0, counter_mode (count_exp), 1, label);
20783 if (expected_size == -1
20784 || expected_size < (desired_align - align) / 2 + size_needed)
20785 predict_jump (REG_BR_PROB_BASE * 20 / 100);
20787 predict_jump (REG_BR_PROB_BASE * 60 / 100);
20790 if (label && size_needed == 1)
20792 emit_label (label);
20793 LABEL_NUSES (label) = 1;
20795 epilogue_size_needed = 1;
20797 else if (label == NULL_RTX)
20798 epilogue_size_needed = size_needed;
20800 /* Step 3: Main loop. */
20806 gcc_unreachable ();
20808 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
20809 count_exp, QImode, 1, expected_size);
20812 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
20813 count_exp, Pmode, 1, expected_size);
20815 case unrolled_loop:
20816 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
20817 registers for 4 temporaries anyway. */
20818 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
20819 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
20822 case rep_prefix_8_byte:
20823 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
20826 case rep_prefix_4_byte:
20827 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
20830 case rep_prefix_1_byte:
20831 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
20835 /* Adjust properly the offset of src and dest memory for aliasing. */
20836 if (CONST_INT_P (count_exp))
20838 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
20839 (count / size_needed) * size_needed);
20840 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
20841 (count / size_needed) * size_needed);
20845 src = change_address (src, BLKmode, srcreg);
20846 dst = change_address (dst, BLKmode, destreg);
20849 /* Step 4: Epilogue to copy the remaining bytes. */
20853 /* When the main loop is done, COUNT_EXP might hold original count,
20854 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
20855 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
20856 bytes. Compensate if needed. */
20858 if (size_needed < epilogue_size_needed)
20861 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
20862 GEN_INT (size_needed - 1), count_exp, 1,
20864 if (tmp != count_exp)
20865 emit_move_insn (count_exp, tmp);
20867 emit_label (label);
20868 LABEL_NUSES (label) = 1;
20871 if (count_exp != const0_rtx && epilogue_size_needed > 1)
20872 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
20873 epilogue_size_needed);
20874 if (jump_around_label)
20875 emit_label (jump_around_label);
20879 /* Helper function for memcpy. For QImode value 0xXY produce
20880 0xXYXYXYXY of wide specified by MODE. This is essentially
20881 a * 0x10101010, but we can do slightly better than
20882 synth_mult by unwinding the sequence by hand on CPUs with
20885 promote_duplicated_reg (enum machine_mode mode, rtx val)
20887 enum machine_mode valmode = GET_MODE (val);
20889 int nops = mode == DImode ? 3 : 2;
20891 gcc_assert (mode == SImode || mode == DImode);
20892 if (val == const0_rtx)
20893 return copy_to_mode_reg (mode, const0_rtx);
20894 if (CONST_INT_P (val))
20896 HOST_WIDE_INT v = INTVAL (val) & 255;
20900 if (mode == DImode)
20901 v |= (v << 16) << 16;
20902 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
20905 if (valmode == VOIDmode)
20907 if (valmode != QImode)
20908 val = gen_lowpart (QImode, val);
20909 if (mode == QImode)
20911 if (!TARGET_PARTIAL_REG_STALL)
20913 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
20914 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
20915 <= (ix86_cost->shift_const + ix86_cost->add) * nops
20916 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
20918 rtx reg = convert_modes (mode, QImode, val, true);
20919 tmp = promote_duplicated_reg (mode, const1_rtx);
20920 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
20925 rtx reg = convert_modes (mode, QImode, val, true);
20927 if (!TARGET_PARTIAL_REG_STALL)
20928 if (mode == SImode)
20929 emit_insn (gen_movsi_insv_1 (reg, reg));
20931 emit_insn (gen_movdi_insv_1 (reg, reg));
20934 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
20935 NULL, 1, OPTAB_DIRECT);
20937 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
20939 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
20940 NULL, 1, OPTAB_DIRECT);
20941 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
20942 if (mode == SImode)
20944 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
20945 NULL, 1, OPTAB_DIRECT);
20946 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
20951 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
20952 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
20953 alignment from ALIGN to DESIRED_ALIGN. */
20955 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
20960 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
20961 promoted_val = promote_duplicated_reg (DImode, val);
20962 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
20963 promoted_val = promote_duplicated_reg (SImode, val);
20964 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
20965 promoted_val = promote_duplicated_reg (HImode, val);
20967 promoted_val = val;
20969 return promoted_val;
20972 /* Expand string clear operation (bzero). Use i386 string operations when
20973 profitable. See expand_movmem comment for explanation of individual
20974 steps performed. */
20976 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
20977 rtx expected_align_exp, rtx expected_size_exp)
20982 rtx jump_around_label = NULL;
20983 HOST_WIDE_INT align = 1;
20984 unsigned HOST_WIDE_INT count = 0;
20985 HOST_WIDE_INT expected_size = -1;
20986 int size_needed = 0, epilogue_size_needed;
20987 int desired_align = 0, align_bytes = 0;
20988 enum stringop_alg alg;
20989 rtx promoted_val = NULL;
20990 bool force_loopy_epilogue = false;
20992 bool need_zero_guard = false;
20994 if (CONST_INT_P (align_exp))
20995 align = INTVAL (align_exp);
20996 /* i386 can do misaligned access on reasonably increased cost. */
20997 if (CONST_INT_P (expected_align_exp)
20998 && INTVAL (expected_align_exp) > align)
20999 align = INTVAL (expected_align_exp);
21000 if (CONST_INT_P (count_exp))
21001 count = expected_size = INTVAL (count_exp);
21002 if (CONST_INT_P (expected_size_exp) && count == 0)
21003 expected_size = INTVAL (expected_size_exp);
21005 /* Make sure we don't need to care about overflow later on. */
21006 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
21009 /* Step 0: Decide on preferred algorithm, desired alignment and
21010 size of chunks to be copied by main loop. */
21012 alg = decide_alg (count, expected_size, true, &dynamic_check);
21013 desired_align = decide_alignment (align, alg, expected_size);
21015 if (!TARGET_ALIGN_STRINGOPS)
21016 align = desired_align;
21018 if (alg == libcall)
21020 gcc_assert (alg != no_stringop);
21022 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
21023 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
21028 gcc_unreachable ();
21030 need_zero_guard = true;
21031 size_needed = GET_MODE_SIZE (Pmode);
21033 case unrolled_loop:
21034 need_zero_guard = true;
21035 size_needed = GET_MODE_SIZE (Pmode) * 4;
21037 case rep_prefix_8_byte:
21040 case rep_prefix_4_byte:
21043 case rep_prefix_1_byte:
21047 need_zero_guard = true;
21051 epilogue_size_needed = size_needed;
21053 /* Step 1: Prologue guard. */
21055 /* Alignment code needs count to be in register. */
21056 if (CONST_INT_P (count_exp) && desired_align > align)
21058 if (INTVAL (count_exp) > desired_align
21059 && INTVAL (count_exp) > size_needed)
21062 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
21063 if (align_bytes <= 0)
21066 align_bytes = desired_align - align_bytes;
21068 if (align_bytes == 0)
21070 enum machine_mode mode = SImode;
21071 if (TARGET_64BIT && (count & ~0xffffffff))
21073 count_exp = force_reg (mode, count_exp);
21076 /* Do the cheap promotion to allow better CSE across the
21077 main loop and epilogue (ie one load of the big constant in the
21078 front of all code. */
21079 if (CONST_INT_P (val_exp))
21080 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21081 desired_align, align);
21082 /* Ensure that alignment prologue won't copy past end of block. */
21083 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
21085 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
21086 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
21087 Make sure it is power of 2. */
21088 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
21090 /* To improve performance of small blocks, we jump around the VAL
21091 promoting mode. This mean that if the promoted VAL is not constant,
21092 we might not use it in the epilogue and have to use byte
21094 if (epilogue_size_needed > 2 && !promoted_val)
21095 force_loopy_epilogue = true;
21098 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
21100 /* If main algorithm works on QImode, no epilogue is needed.
21101 For small sizes just don't align anything. */
21102 if (size_needed == 1)
21103 desired_align = align;
21110 label = gen_label_rtx ();
21111 emit_cmp_and_jump_insns (count_exp,
21112 GEN_INT (epilogue_size_needed),
21113 LTU, 0, counter_mode (count_exp), 1, label);
21114 if (expected_size == -1 || expected_size <= epilogue_size_needed)
21115 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21117 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21120 if (dynamic_check != -1)
21122 rtx hot_label = gen_label_rtx ();
21123 jump_around_label = gen_label_rtx ();
21124 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
21125 LEU, 0, counter_mode (count_exp), 1, hot_label);
21126 predict_jump (REG_BR_PROB_BASE * 90 / 100);
21127 set_storage_via_libcall (dst, count_exp, val_exp, false);
21128 emit_jump (jump_around_label);
21129 emit_label (hot_label);
21132 /* Step 2: Alignment prologue. */
21134 /* Do the expensive promotion once we branched off the small blocks. */
21136 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
21137 desired_align, align);
21138 gcc_assert (desired_align >= 1 && align >= 1);
21140 if (desired_align > align)
21142 if (align_bytes == 0)
21144 /* Except for the first move in epilogue, we no longer know
21145 constant offset in aliasing info. It don't seems to worth
21146 the pain to maintain it for the first move, so throw away
21148 dst = change_address (dst, BLKmode, destreg);
21149 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
21154 /* If we know how many bytes need to be stored before dst is
21155 sufficiently aligned, maintain aliasing info accurately. */
21156 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
21157 desired_align, align_bytes);
21158 count_exp = plus_constant (count_exp, -align_bytes);
21159 count -= align_bytes;
21161 if (need_zero_guard
21162 && (count < (unsigned HOST_WIDE_INT) size_needed
21163 || (align_bytes == 0
21164 && count < ((unsigned HOST_WIDE_INT) size_needed
21165 + desired_align - align))))
21167 /* It is possible that we copied enough so the main loop will not
21169 gcc_assert (size_needed > 1);
21170 if (label == NULL_RTX)
21171 label = gen_label_rtx ();
21172 emit_cmp_and_jump_insns (count_exp,
21173 GEN_INT (size_needed),
21174 LTU, 0, counter_mode (count_exp), 1, label);
21175 if (expected_size == -1
21176 || expected_size < (desired_align - align) / 2 + size_needed)
21177 predict_jump (REG_BR_PROB_BASE * 20 / 100);
21179 predict_jump (REG_BR_PROB_BASE * 60 / 100);
21182 if (label && size_needed == 1)
21184 emit_label (label);
21185 LABEL_NUSES (label) = 1;
21187 promoted_val = val_exp;
21188 epilogue_size_needed = 1;
21190 else if (label == NULL_RTX)
21191 epilogue_size_needed = size_needed;
21193 /* Step 3: Main loop. */
21199 gcc_unreachable ();
21201 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21202 count_exp, QImode, 1, expected_size);
21205 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21206 count_exp, Pmode, 1, expected_size);
21208 case unrolled_loop:
21209 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
21210 count_exp, Pmode, 4, expected_size);
21212 case rep_prefix_8_byte:
21213 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21216 case rep_prefix_4_byte:
21217 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21220 case rep_prefix_1_byte:
21221 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
21225 /* Adjust properly the offset of src and dest memory for aliasing. */
21226 if (CONST_INT_P (count_exp))
21227 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
21228 (count / size_needed) * size_needed);
21230 dst = change_address (dst, BLKmode, destreg);
21232 /* Step 4: Epilogue to copy the remaining bytes. */
21236 /* When the main loop is done, COUNT_EXP might hold original count,
21237 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
21238 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
21239 bytes. Compensate if needed. */
21241 if (size_needed < epilogue_size_needed)
21244 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
21245 GEN_INT (size_needed - 1), count_exp, 1,
21247 if (tmp != count_exp)
21248 emit_move_insn (count_exp, tmp);
21250 emit_label (label);
21251 LABEL_NUSES (label) = 1;
21254 if (count_exp != const0_rtx && epilogue_size_needed > 1)
21256 if (force_loopy_epilogue)
21257 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
21258 epilogue_size_needed);
21260 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
21261 epilogue_size_needed);
21263 if (jump_around_label)
21264 emit_label (jump_around_label);
21268 /* Expand the appropriate insns for doing strlen if not just doing
21271 out = result, initialized with the start address
21272 align_rtx = alignment of the address.
21273 scratch = scratch register, initialized with the startaddress when
21274 not aligned, otherwise undefined
21276 This is just the body. It needs the initializations mentioned above and
21277 some address computing at the end. These things are done in i386.md. */
21280 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
21284 rtx align_2_label = NULL_RTX;
21285 rtx align_3_label = NULL_RTX;
21286 rtx align_4_label = gen_label_rtx ();
21287 rtx end_0_label = gen_label_rtx ();
21289 rtx tmpreg = gen_reg_rtx (SImode);
21290 rtx scratch = gen_reg_rtx (SImode);
21294 if (CONST_INT_P (align_rtx))
21295 align = INTVAL (align_rtx);
21297 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
21299 /* Is there a known alignment and is it less than 4? */
21302 rtx scratch1 = gen_reg_rtx (Pmode);
21303 emit_move_insn (scratch1, out);
21304 /* Is there a known alignment and is it not 2? */
21307 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
21308 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
21310 /* Leave just the 3 lower bits. */
21311 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
21312 NULL_RTX, 0, OPTAB_WIDEN);
21314 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21315 Pmode, 1, align_4_label);
21316 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
21317 Pmode, 1, align_2_label);
21318 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
21319 Pmode, 1, align_3_label);
21323 /* Since the alignment is 2, we have to check 2 or 0 bytes;
21324 check if is aligned to 4 - byte. */
21326 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
21327 NULL_RTX, 0, OPTAB_WIDEN);
21329 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
21330 Pmode, 1, align_4_label);
21333 mem = change_address (src, QImode, out);
21335 /* Now compare the bytes. */
21337 /* Compare the first n unaligned byte on a byte per byte basis. */
21338 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
21339 QImode, 1, end_0_label);
21341 /* Increment the address. */
21342 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21344 /* Not needed with an alignment of 2 */
21347 emit_label (align_2_label);
21349 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21352 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21354 emit_label (align_3_label);
21357 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
21360 emit_insn (ix86_gen_add3 (out, out, const1_rtx));
21363 /* Generate loop to check 4 bytes at a time. It is not a good idea to
21364 align this loop. It gives only huge programs, but does not help to
21366 emit_label (align_4_label);
21368 mem = change_address (src, SImode, out);
21369 emit_move_insn (scratch, mem);
21370 emit_insn (ix86_gen_add3 (out, out, GEN_INT (4)));
21372 /* This formula yields a nonzero result iff one of the bytes is zero.
21373 This saves three branches inside loop and many cycles. */
21375 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
21376 emit_insn (gen_one_cmplsi2 (scratch, scratch));
21377 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
21378 emit_insn (gen_andsi3 (tmpreg, tmpreg,
21379 gen_int_mode (0x80808080, SImode)));
21380 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
21385 rtx reg = gen_reg_rtx (SImode);
21386 rtx reg2 = gen_reg_rtx (Pmode);
21387 emit_move_insn (reg, tmpreg);
21388 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
21390 /* If zero is not in the first two bytes, move two bytes forward. */
21391 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21392 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21393 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21394 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
21395 gen_rtx_IF_THEN_ELSE (SImode, tmp,
21398 /* Emit lea manually to avoid clobbering of flags. */
21399 emit_insn (gen_rtx_SET (SImode, reg2,
21400 gen_rtx_PLUS (Pmode, out, const2_rtx)));
21402 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21403 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
21404 emit_insn (gen_rtx_SET (VOIDmode, out,
21405 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
21411 rtx end_2_label = gen_label_rtx ();
21412 /* Is zero in the first two bytes? */
21414 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
21415 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
21416 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
21417 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
21418 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
21420 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
21421 JUMP_LABEL (tmp) = end_2_label;
21423 /* Not in the first two. Move two bytes forward. */
21424 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
21425 emit_insn (ix86_gen_add3 (out, out, const2_rtx));
21427 emit_label (end_2_label);
21431 /* Avoid branch in fixing the byte. */
21432 tmpreg = gen_lowpart (QImode, tmpreg);
21433 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
21434 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
21435 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
21436 emit_insn (ix86_gen_sub3_carry (out, out, GEN_INT (3), tmp, cmp));
21438 emit_label (end_0_label);
21441 /* Expand strlen. */
21444 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
21446 rtx addr, scratch1, scratch2, scratch3, scratch4;
21448 /* The generic case of strlen expander is long. Avoid it's
21449 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
21451 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21452 && !TARGET_INLINE_ALL_STRINGOPS
21453 && !optimize_insn_for_size_p ()
21454 && (!CONST_INT_P (align) || INTVAL (align) < 4))
21457 addr = force_reg (Pmode, XEXP (src, 0));
21458 scratch1 = gen_reg_rtx (Pmode);
21460 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
21461 && !optimize_insn_for_size_p ())
21463 /* Well it seems that some optimizer does not combine a call like
21464 foo(strlen(bar), strlen(bar));
21465 when the move and the subtraction is done here. It does calculate
21466 the length just once when these instructions are done inside of
21467 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
21468 often used and I use one fewer register for the lifetime of
21469 output_strlen_unroll() this is better. */
21471 emit_move_insn (out, addr);
21473 ix86_expand_strlensi_unroll_1 (out, src, align);
21475 /* strlensi_unroll_1 returns the address of the zero at the end of
21476 the string, like memchr(), so compute the length by subtracting
21477 the start address. */
21478 emit_insn (ix86_gen_sub3 (out, out, addr));
21484 /* Can't use this if the user has appropriated eax, ecx, or edi. */
21485 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
21488 scratch2 = gen_reg_rtx (Pmode);
21489 scratch3 = gen_reg_rtx (Pmode);
21490 scratch4 = force_reg (Pmode, constm1_rtx);
21492 emit_move_insn (scratch3, addr);
21493 eoschar = force_reg (QImode, eoschar);
21495 src = replace_equiv_address_nv (src, scratch3);
21497 /* If .md starts supporting :P, this can be done in .md. */
21498 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
21499 scratch4), UNSPEC_SCAS);
21500 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
21501 emit_insn (ix86_gen_one_cmpl2 (scratch2, scratch1));
21502 emit_insn (ix86_gen_add3 (out, scratch2, constm1_rtx));
21507 /* For given symbol (function) construct code to compute address of it's PLT
21508 entry in large x86-64 PIC model. */
21510 construct_plt_address (rtx symbol)
21512 rtx tmp = gen_reg_rtx (Pmode);
21513 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
21515 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
21516 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
21518 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
21519 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
21524 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
21526 rtx pop, bool sibcall)
21528 /* We need to represent that SI and DI registers are clobbered
21530 static int clobbered_registers[] = {
21531 XMM6_REG, XMM7_REG, XMM8_REG,
21532 XMM9_REG, XMM10_REG, XMM11_REG,
21533 XMM12_REG, XMM13_REG, XMM14_REG,
21534 XMM15_REG, SI_REG, DI_REG
21536 rtx vec[ARRAY_SIZE (clobbered_registers) + 3];
21537 rtx use = NULL, call;
21538 unsigned int vec_len;
21540 if (pop == const0_rtx)
21542 gcc_assert (!TARGET_64BIT || !pop);
21544 if (TARGET_MACHO && !TARGET_64BIT)
21547 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
21548 fnaddr = machopic_indirect_call_target (fnaddr);
21553 /* Static functions and indirect calls don't need the pic register. */
21554 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
21555 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21556 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
21557 use_reg (&use, pic_offset_table_rtx);
21560 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
21562 rtx al = gen_rtx_REG (QImode, AX_REG);
21563 emit_move_insn (al, callarg2);
21564 use_reg (&use, al);
21567 if (ix86_cmodel == CM_LARGE_PIC
21569 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
21570 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
21571 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
21573 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
21574 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
21576 fnaddr = XEXP (fnaddr, 0);
21577 if (GET_MODE (fnaddr) != Pmode)
21578 fnaddr = convert_to_mode (Pmode, fnaddr, 1);
21579 fnaddr = gen_rtx_MEM (QImode, copy_to_mode_reg (Pmode, fnaddr));
21583 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
21585 call = gen_rtx_SET (VOIDmode, retval, call);
21586 vec[vec_len++] = call;
21590 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
21591 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
21592 vec[vec_len++] = pop;
21595 if (TARGET_64BIT_MS_ABI
21596 && (!callarg2 || INTVAL (callarg2) != -2))
21600 vec[vec_len++] = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
21601 UNSPEC_MS_TO_SYSV_CALL);
21603 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
21605 = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
21607 gen_rtx_REG (SSE_REGNO_P (clobbered_registers[i])
21609 clobbered_registers[i]));
21612 /* Add UNSPEC_CALL_NEEDS_VZEROUPPER decoration. */
21613 if (TARGET_VZEROUPPER)
21616 if (cfun->machine->callee_pass_avx256_p)
21618 if (cfun->machine->callee_return_avx256_p)
21619 avx256 = callee_return_pass_avx256;
21621 avx256 = callee_pass_avx256;
21623 else if (cfun->machine->callee_return_avx256_p)
21624 avx256 = callee_return_avx256;
21626 avx256 = call_no_avx256;
21628 if (reload_completed)
21629 emit_insn (gen_avx_vzeroupper (GEN_INT (avx256)));
21631 vec[vec_len++] = gen_rtx_UNSPEC (VOIDmode,
21632 gen_rtvec (1, GEN_INT (avx256)),
21633 UNSPEC_CALL_NEEDS_VZEROUPPER);
21637 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (vec_len, vec));
21638 call = emit_call_insn (call);
21640 CALL_INSN_FUNCTION_USAGE (call) = use;
21646 ix86_split_call_vzeroupper (rtx insn, rtx vzeroupper)
21648 rtx pat = PATTERN (insn);
21649 rtvec vec = XVEC (pat, 0);
21650 int len = GET_NUM_ELEM (vec) - 1;
21652 /* Strip off the last entry of the parallel. */
21653 gcc_assert (GET_CODE (RTVEC_ELT (vec, len)) == UNSPEC);
21654 gcc_assert (XINT (RTVEC_ELT (vec, len), 1) == UNSPEC_CALL_NEEDS_VZEROUPPER);
21656 pat = RTVEC_ELT (vec, 0);
21658 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (len, &RTVEC_ELT (vec, 0)));
21660 emit_insn (gen_avx_vzeroupper (vzeroupper));
21661 emit_call_insn (pat);
21664 /* Output the assembly for a call instruction. */
21667 ix86_output_call_insn (rtx insn, rtx call_op)
21669 bool direct_p = constant_call_address_operand (call_op, Pmode);
21670 bool seh_nop_p = false;
21673 if (SIBLING_CALL_P (insn))
21677 /* SEH epilogue detection requires the indirect branch case
21678 to include REX.W. */
21679 else if (TARGET_SEH)
21680 xasm = "rex.W jmp %A0";
21684 output_asm_insn (xasm, &call_op);
21688 /* SEH unwinding can require an extra nop to be emitted in several
21689 circumstances. Determine if we have one of those. */
21694 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
21696 /* If we get to another real insn, we don't need the nop. */
21700 /* If we get to the epilogue note, prevent a catch region from
21701 being adjacent to the standard epilogue sequence. If non-
21702 call-exceptions, we'll have done this during epilogue emission. */
21703 if (NOTE_P (i) && NOTE_KIND (i) == NOTE_INSN_EPILOGUE_BEG
21704 && !flag_non_call_exceptions
21705 && !can_throw_internal (insn))
21712 /* If we didn't find a real insn following the call, prevent the
21713 unwinder from looking into the next function. */
21719 xasm = "call\t%P0";
21721 xasm = "call\t%A0";
21723 output_asm_insn (xasm, &call_op);
21731 /* Clear stack slot assignments remembered from previous functions.
21732 This is called from INIT_EXPANDERS once before RTL is emitted for each
21735 static struct machine_function *
21736 ix86_init_machine_status (void)
21738 struct machine_function *f;
21740 f = ggc_alloc_cleared_machine_function ();
21741 f->use_fast_prologue_epilogue_nregs = -1;
21742 f->tls_descriptor_call_expanded_p = 0;
21743 f->call_abi = ix86_abi;
21748 /* Return a MEM corresponding to a stack slot with mode MODE.
21749 Allocate a new slot if necessary.
21751 The RTL for a function can have several slots available: N is
21752 which slot to use. */
21755 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
21757 struct stack_local_entry *s;
21759 gcc_assert (n < MAX_386_STACK_LOCALS);
21761 /* Virtual slot is valid only before vregs are instantiated. */
21762 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
21764 for (s = ix86_stack_locals; s; s = s->next)
21765 if (s->mode == mode && s->n == n)
21766 return copy_rtx (s->rtl);
21768 s = ggc_alloc_stack_local_entry ();
21771 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
21773 s->next = ix86_stack_locals;
21774 ix86_stack_locals = s;
21778 /* Calculate the length of the memory address in the instruction
21779 encoding. Does not include the one-byte modrm, opcode, or prefix. */
21782 memory_address_length (rtx addr)
21784 struct ix86_address parts;
21785 rtx base, index, disp;
21789 if (GET_CODE (addr) == PRE_DEC
21790 || GET_CODE (addr) == POST_INC
21791 || GET_CODE (addr) == PRE_MODIFY
21792 || GET_CODE (addr) == POST_MODIFY)
21795 ok = ix86_decompose_address (addr, &parts);
21798 if (parts.base && GET_CODE (parts.base) == SUBREG)
21799 parts.base = SUBREG_REG (parts.base);
21800 if (parts.index && GET_CODE (parts.index) == SUBREG)
21801 parts.index = SUBREG_REG (parts.index);
21804 index = parts.index;
21809 - esp as the base always wants an index,
21810 - ebp as the base always wants a displacement,
21811 - r12 as the base always wants an index,
21812 - r13 as the base always wants a displacement. */
21814 /* Register Indirect. */
21815 if (base && !index && !disp)
21817 /* esp (for its index) and ebp (for its displacement) need
21818 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
21821 && (addr == arg_pointer_rtx
21822 || addr == frame_pointer_rtx
21823 || REGNO (addr) == SP_REG
21824 || REGNO (addr) == BP_REG
21825 || REGNO (addr) == R12_REG
21826 || REGNO (addr) == R13_REG))
21830 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
21831 is not disp32, but disp32(%rip), so for disp32
21832 SIB byte is needed, unless print_operand_address
21833 optimizes it into disp32(%rip) or (%rip) is implied
21835 else if (disp && !base && !index)
21842 if (GET_CODE (disp) == CONST)
21843 symbol = XEXP (disp, 0);
21844 if (GET_CODE (symbol) == PLUS
21845 && CONST_INT_P (XEXP (symbol, 1)))
21846 symbol = XEXP (symbol, 0);
21848 if (GET_CODE (symbol) != LABEL_REF
21849 && (GET_CODE (symbol) != SYMBOL_REF
21850 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
21851 && (GET_CODE (symbol) != UNSPEC
21852 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
21853 && XINT (symbol, 1) != UNSPEC_PCREL
21854 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
21861 /* Find the length of the displacement constant. */
21864 if (base && satisfies_constraint_K (disp))
21869 /* ebp always wants a displacement. Similarly r13. */
21870 else if (base && REG_P (base)
21871 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
21874 /* An index requires the two-byte modrm form.... */
21876 /* ...like esp (or r12), which always wants an index. */
21877 || base == arg_pointer_rtx
21878 || base == frame_pointer_rtx
21879 || (base && REG_P (base)
21880 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
21897 /* Compute default value for "length_immediate" attribute. When SHORTFORM
21898 is set, expect that insn have 8bit immediate alternative. */
21900 ix86_attr_length_immediate_default (rtx insn, bool shortform)
21904 extract_insn_cached (insn);
21905 for (i = recog_data.n_operands - 1; i >= 0; --i)
21906 if (CONSTANT_P (recog_data.operand[i]))
21908 enum attr_mode mode = get_attr_mode (insn);
21911 if (shortform && CONST_INT_P (recog_data.operand[i]))
21913 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
21920 ival = trunc_int_for_mode (ival, HImode);
21923 ival = trunc_int_for_mode (ival, SImode);
21928 if (IN_RANGE (ival, -128, 127))
21945 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
21950 fatal_insn ("unknown insn mode", insn);
21955 /* Compute default value for "length_address" attribute. */
21957 ix86_attr_length_address_default (rtx insn)
21961 if (get_attr_type (insn) == TYPE_LEA)
21963 rtx set = PATTERN (insn), addr;
21965 if (GET_CODE (set) == PARALLEL)
21966 set = XVECEXP (set, 0, 0);
21968 gcc_assert (GET_CODE (set) == SET);
21970 addr = SET_SRC (set);
21971 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
21973 if (GET_CODE (addr) == ZERO_EXTEND)
21974 addr = XEXP (addr, 0);
21975 if (GET_CODE (addr) == SUBREG)
21976 addr = SUBREG_REG (addr);
21979 return memory_address_length (addr);
21982 extract_insn_cached (insn);
21983 for (i = recog_data.n_operands - 1; i >= 0; --i)
21984 if (MEM_P (recog_data.operand[i]))
21986 constrain_operands_cached (reload_completed);
21987 if (which_alternative != -1)
21989 const char *constraints = recog_data.constraints[i];
21990 int alt = which_alternative;
21992 while (*constraints == '=' || *constraints == '+')
21995 while (*constraints++ != ',')
21997 /* Skip ignored operands. */
21998 if (*constraints == 'X')
22001 return memory_address_length (XEXP (recog_data.operand[i], 0));
22006 /* Compute default value for "length_vex" attribute. It includes
22007 2 or 3 byte VEX prefix and 1 opcode byte. */
22010 ix86_attr_length_vex_default (rtx insn, bool has_0f_opcode, bool has_vex_w)
22014 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
22015 byte VEX prefix. */
22016 if (!has_0f_opcode || has_vex_w)
22019 /* We can always use 2 byte VEX prefix in 32bit. */
22023 extract_insn_cached (insn);
22025 for (i = recog_data.n_operands - 1; i >= 0; --i)
22026 if (REG_P (recog_data.operand[i]))
22028 /* REX.W bit uses 3 byte VEX prefix. */
22029 if (GET_MODE (recog_data.operand[i]) == DImode
22030 && GENERAL_REG_P (recog_data.operand[i]))
22035 /* REX.X or REX.B bits use 3 byte VEX prefix. */
22036 if (MEM_P (recog_data.operand[i])
22037 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
22044 /* Return the maximum number of instructions a cpu can issue. */
22047 ix86_issue_rate (void)
22051 case PROCESSOR_PENTIUM:
22052 case PROCESSOR_ATOM:
22056 case PROCESSOR_PENTIUMPRO:
22057 case PROCESSOR_PENTIUM4:
22058 case PROCESSOR_CORE2_32:
22059 case PROCESSOR_CORE2_64:
22060 case PROCESSOR_COREI7_32:
22061 case PROCESSOR_COREI7_64:
22062 case PROCESSOR_ATHLON:
22064 case PROCESSOR_AMDFAM10:
22065 case PROCESSOR_NOCONA:
22066 case PROCESSOR_GENERIC32:
22067 case PROCESSOR_GENERIC64:
22068 case PROCESSOR_BDVER1:
22069 case PROCESSOR_BDVER2:
22070 case PROCESSOR_BTVER1:
22078 /* A subroutine of ix86_adjust_cost -- return TRUE iff INSN reads flags set
22079 by DEP_INSN and nothing set by DEP_INSN. */
22082 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
22086 /* Simplify the test for uninteresting insns. */
22087 if (insn_type != TYPE_SETCC
22088 && insn_type != TYPE_ICMOV
22089 && insn_type != TYPE_FCMOV
22090 && insn_type != TYPE_IBR)
22093 if ((set = single_set (dep_insn)) != 0)
22095 set = SET_DEST (set);
22098 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
22099 && XVECLEN (PATTERN (dep_insn), 0) == 2
22100 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
22101 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
22103 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22104 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
22109 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
22112 /* This test is true if the dependent insn reads the flags but
22113 not any other potentially set register. */
22114 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
22117 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
22123 /* Return true iff USE_INSN has a memory address with operands set by
22127 ix86_agi_dependent (rtx set_insn, rtx use_insn)
22130 extract_insn_cached (use_insn);
22131 for (i = recog_data.n_operands - 1; i >= 0; --i)
22132 if (MEM_P (recog_data.operand[i]))
22134 rtx addr = XEXP (recog_data.operand[i], 0);
22135 return modified_in_p (addr, set_insn) != 0;
22141 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22143 enum attr_type insn_type, dep_insn_type;
22144 enum attr_memory memory;
22146 int dep_insn_code_number;
22148 /* Anti and output dependencies have zero cost on all CPUs. */
22149 if (REG_NOTE_KIND (link) != 0)
22152 dep_insn_code_number = recog_memoized (dep_insn);
22154 /* If we can't recognize the insns, we can't really do anything. */
22155 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
22158 insn_type = get_attr_type (insn);
22159 dep_insn_type = get_attr_type (dep_insn);
22163 case PROCESSOR_PENTIUM:
22164 /* Address Generation Interlock adds a cycle of latency. */
22165 if (insn_type == TYPE_LEA)
22167 rtx addr = PATTERN (insn);
22169 if (GET_CODE (addr) == PARALLEL)
22170 addr = XVECEXP (addr, 0, 0);
22172 gcc_assert (GET_CODE (addr) == SET);
22174 addr = SET_SRC (addr);
22175 if (modified_in_p (addr, dep_insn))
22178 else if (ix86_agi_dependent (dep_insn, insn))
22181 /* ??? Compares pair with jump/setcc. */
22182 if (ix86_flags_dependent (insn, dep_insn, insn_type))
22185 /* Floating point stores require value to be ready one cycle earlier. */
22186 if (insn_type == TYPE_FMOV
22187 && get_attr_memory (insn) == MEMORY_STORE
22188 && !ix86_agi_dependent (dep_insn, insn))
22192 case PROCESSOR_PENTIUMPRO:
22193 memory = get_attr_memory (insn);
22195 /* INT->FP conversion is expensive. */
22196 if (get_attr_fp_int_src (dep_insn))
22199 /* There is one cycle extra latency between an FP op and a store. */
22200 if (insn_type == TYPE_FMOV
22201 && (set = single_set (dep_insn)) != NULL_RTX
22202 && (set2 = single_set (insn)) != NULL_RTX
22203 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
22204 && MEM_P (SET_DEST (set2)))
22207 /* Show ability of reorder buffer to hide latency of load by executing
22208 in parallel with previous instruction in case
22209 previous instruction is not needed to compute the address. */
22210 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22211 && !ix86_agi_dependent (dep_insn, insn))
22213 /* Claim moves to take one cycle, as core can issue one load
22214 at time and the next load can start cycle later. */
22215 if (dep_insn_type == TYPE_IMOV
22216 || dep_insn_type == TYPE_FMOV)
22224 memory = get_attr_memory (insn);
22226 /* The esp dependency is resolved before the instruction is really
22228 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
22229 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
22232 /* INT->FP conversion is expensive. */
22233 if (get_attr_fp_int_src (dep_insn))
22236 /* Show ability of reorder buffer to hide latency of load by executing
22237 in parallel with previous instruction in case
22238 previous instruction is not needed to compute the address. */
22239 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22240 && !ix86_agi_dependent (dep_insn, insn))
22242 /* Claim moves to take one cycle, as core can issue one load
22243 at time and the next load can start cycle later. */
22244 if (dep_insn_type == TYPE_IMOV
22245 || dep_insn_type == TYPE_FMOV)
22254 case PROCESSOR_ATHLON:
22256 case PROCESSOR_AMDFAM10:
22257 case PROCESSOR_BDVER1:
22258 case PROCESSOR_BDVER2:
22259 case PROCESSOR_BTVER1:
22260 case PROCESSOR_ATOM:
22261 case PROCESSOR_GENERIC32:
22262 case PROCESSOR_GENERIC64:
22263 memory = get_attr_memory (insn);
22265 /* Show ability of reorder buffer to hide latency of load by executing
22266 in parallel with previous instruction in case
22267 previous instruction is not needed to compute the address. */
22268 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
22269 && !ix86_agi_dependent (dep_insn, insn))
22271 enum attr_unit unit = get_attr_unit (insn);
22274 /* Because of the difference between the length of integer and
22275 floating unit pipeline preparation stages, the memory operands
22276 for floating point are cheaper.
22278 ??? For Athlon it the difference is most probably 2. */
22279 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
22282 loadcost = TARGET_ATHLON ? 2 : 0;
22284 if (cost >= loadcost)
22297 /* How many alternative schedules to try. This should be as wide as the
22298 scheduling freedom in the DFA, but no wider. Making this value too
22299 large results extra work for the scheduler. */
22302 ia32_multipass_dfa_lookahead (void)
22306 case PROCESSOR_PENTIUM:
22309 case PROCESSOR_PENTIUMPRO:
22313 case PROCESSOR_CORE2_32:
22314 case PROCESSOR_CORE2_64:
22315 case PROCESSOR_COREI7_32:
22316 case PROCESSOR_COREI7_64:
22317 /* Generally, we want haifa-sched:max_issue() to look ahead as far
22318 as many instructions can be executed on a cycle, i.e.,
22319 issue_rate. I wonder why tuning for many CPUs does not do this. */
22320 return ix86_issue_rate ();
22329 /* Model decoder of Core 2/i7.
22330 Below hooks for multipass scheduling (see haifa-sched.c:max_issue)
22331 track the instruction fetch block boundaries and make sure that long
22332 (9+ bytes) instructions are assigned to D0. */
22334 /* Maximum length of an insn that can be handled by
22335 a secondary decoder unit. '8' for Core 2/i7. */
22336 static int core2i7_secondary_decoder_max_insn_size;
22338 /* Ifetch block size, i.e., number of bytes decoder reads per cycle.
22339 '16' for Core 2/i7. */
22340 static int core2i7_ifetch_block_size;
22342 /* Maximum number of instructions decoder can handle per cycle.
22343 '6' for Core 2/i7. */
22344 static int core2i7_ifetch_block_max_insns;
22346 typedef struct ix86_first_cycle_multipass_data_ *
22347 ix86_first_cycle_multipass_data_t;
22348 typedef const struct ix86_first_cycle_multipass_data_ *
22349 const_ix86_first_cycle_multipass_data_t;
22351 /* A variable to store target state across calls to max_issue within
22353 static struct ix86_first_cycle_multipass_data_ _ix86_first_cycle_multipass_data,
22354 *ix86_first_cycle_multipass_data = &_ix86_first_cycle_multipass_data;
22356 /* Initialize DATA. */
22358 core2i7_first_cycle_multipass_init (void *_data)
22360 ix86_first_cycle_multipass_data_t data
22361 = (ix86_first_cycle_multipass_data_t) _data;
22363 data->ifetch_block_len = 0;
22364 data->ifetch_block_n_insns = 0;
22365 data->ready_try_change = NULL;
22366 data->ready_try_change_size = 0;
22369 /* Advancing the cycle; reset ifetch block counts. */
22371 core2i7_dfa_post_advance_cycle (void)
22373 ix86_first_cycle_multipass_data_t data = ix86_first_cycle_multipass_data;
22375 gcc_assert (data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22377 data->ifetch_block_len = 0;
22378 data->ifetch_block_n_insns = 0;
22381 static int min_insn_size (rtx);
22383 /* Filter out insns from ready_try that the core will not be able to issue
22384 on current cycle due to decoder. */
22386 core2i7_first_cycle_multipass_filter_ready_try
22387 (const_ix86_first_cycle_multipass_data_t data,
22388 char *ready_try, int n_ready, bool first_cycle_insn_p)
22395 if (ready_try[n_ready])
22398 insn = get_ready_element (n_ready);
22399 insn_size = min_insn_size (insn);
22401 if (/* If this is a too long an insn for a secondary decoder ... */
22402 (!first_cycle_insn_p
22403 && insn_size > core2i7_secondary_decoder_max_insn_size)
22404 /* ... or it would not fit into the ifetch block ... */
22405 || data->ifetch_block_len + insn_size > core2i7_ifetch_block_size
22406 /* ... or the decoder is full already ... */
22407 || data->ifetch_block_n_insns + 1 > core2i7_ifetch_block_max_insns)
22408 /* ... mask the insn out. */
22410 ready_try[n_ready] = 1;
22412 if (data->ready_try_change)
22413 SET_BIT (data->ready_try_change, n_ready);
22418 /* Prepare for a new round of multipass lookahead scheduling. */
22420 core2i7_first_cycle_multipass_begin (void *_data, char *ready_try, int n_ready,
22421 bool first_cycle_insn_p)
22423 ix86_first_cycle_multipass_data_t data
22424 = (ix86_first_cycle_multipass_data_t) _data;
22425 const_ix86_first_cycle_multipass_data_t prev_data
22426 = ix86_first_cycle_multipass_data;
22428 /* Restore the state from the end of the previous round. */
22429 data->ifetch_block_len = prev_data->ifetch_block_len;
22430 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns;
22432 /* Filter instructions that cannot be issued on current cycle due to
22433 decoder restrictions. */
22434 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22435 first_cycle_insn_p);
22438 /* INSN is being issued in current solution. Account for its impact on
22439 the decoder model. */
22441 core2i7_first_cycle_multipass_issue (void *_data, char *ready_try, int n_ready,
22442 rtx insn, const void *_prev_data)
22444 ix86_first_cycle_multipass_data_t data
22445 = (ix86_first_cycle_multipass_data_t) _data;
22446 const_ix86_first_cycle_multipass_data_t prev_data
22447 = (const_ix86_first_cycle_multipass_data_t) _prev_data;
22449 int insn_size = min_insn_size (insn);
22451 data->ifetch_block_len = prev_data->ifetch_block_len + insn_size;
22452 data->ifetch_block_n_insns = prev_data->ifetch_block_n_insns + 1;
22453 gcc_assert (data->ifetch_block_len <= core2i7_ifetch_block_size
22454 && data->ifetch_block_n_insns <= core2i7_ifetch_block_max_insns);
22456 /* Allocate or resize the bitmap for storing INSN's effect on ready_try. */
22457 if (!data->ready_try_change)
22459 data->ready_try_change = sbitmap_alloc (n_ready);
22460 data->ready_try_change_size = n_ready;
22462 else if (data->ready_try_change_size < n_ready)
22464 data->ready_try_change = sbitmap_resize (data->ready_try_change,
22466 data->ready_try_change_size = n_ready;
22468 sbitmap_zero (data->ready_try_change);
22470 /* Filter out insns from ready_try that the core will not be able to issue
22471 on current cycle due to decoder. */
22472 core2i7_first_cycle_multipass_filter_ready_try (data, ready_try, n_ready,
22476 /* Revert the effect on ready_try. */
22478 core2i7_first_cycle_multipass_backtrack (const void *_data,
22480 int n_ready ATTRIBUTE_UNUSED)
22482 const_ix86_first_cycle_multipass_data_t data
22483 = (const_ix86_first_cycle_multipass_data_t) _data;
22484 unsigned int i = 0;
22485 sbitmap_iterator sbi;
22487 gcc_assert (sbitmap_last_set_bit (data->ready_try_change) < n_ready);
22488 EXECUTE_IF_SET_IN_SBITMAP (data->ready_try_change, 0, i, sbi)
22494 /* Save the result of multipass lookahead scheduling for the next round. */
22496 core2i7_first_cycle_multipass_end (const void *_data)
22498 const_ix86_first_cycle_multipass_data_t data
22499 = (const_ix86_first_cycle_multipass_data_t) _data;
22500 ix86_first_cycle_multipass_data_t next_data
22501 = ix86_first_cycle_multipass_data;
22505 next_data->ifetch_block_len = data->ifetch_block_len;
22506 next_data->ifetch_block_n_insns = data->ifetch_block_n_insns;
22510 /* Deallocate target data. */
22512 core2i7_first_cycle_multipass_fini (void *_data)
22514 ix86_first_cycle_multipass_data_t data
22515 = (ix86_first_cycle_multipass_data_t) _data;
22517 if (data->ready_try_change)
22519 sbitmap_free (data->ready_try_change);
22520 data->ready_try_change = NULL;
22521 data->ready_try_change_size = 0;
22525 /* Prepare for scheduling pass. */
22527 ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
22528 int verbose ATTRIBUTE_UNUSED,
22529 int max_uid ATTRIBUTE_UNUSED)
22531 /* Install scheduling hooks for current CPU. Some of these hooks are used
22532 in time-critical parts of the scheduler, so we only set them up when
22533 they are actually used. */
22536 case PROCESSOR_CORE2_32:
22537 case PROCESSOR_CORE2_64:
22538 case PROCESSOR_COREI7_32:
22539 case PROCESSOR_COREI7_64:
22540 targetm.sched.dfa_post_advance_cycle
22541 = core2i7_dfa_post_advance_cycle;
22542 targetm.sched.first_cycle_multipass_init
22543 = core2i7_first_cycle_multipass_init;
22544 targetm.sched.first_cycle_multipass_begin
22545 = core2i7_first_cycle_multipass_begin;
22546 targetm.sched.first_cycle_multipass_issue
22547 = core2i7_first_cycle_multipass_issue;
22548 targetm.sched.first_cycle_multipass_backtrack
22549 = core2i7_first_cycle_multipass_backtrack;
22550 targetm.sched.first_cycle_multipass_end
22551 = core2i7_first_cycle_multipass_end;
22552 targetm.sched.first_cycle_multipass_fini
22553 = core2i7_first_cycle_multipass_fini;
22555 /* Set decoder parameters. */
22556 core2i7_secondary_decoder_max_insn_size = 8;
22557 core2i7_ifetch_block_size = 16;
22558 core2i7_ifetch_block_max_insns = 6;
22562 targetm.sched.dfa_post_advance_cycle = NULL;
22563 targetm.sched.first_cycle_multipass_init = NULL;
22564 targetm.sched.first_cycle_multipass_begin = NULL;
22565 targetm.sched.first_cycle_multipass_issue = NULL;
22566 targetm.sched.first_cycle_multipass_backtrack = NULL;
22567 targetm.sched.first_cycle_multipass_end = NULL;
22568 targetm.sched.first_cycle_multipass_fini = NULL;
22574 /* Compute the alignment given to a constant that is being placed in memory.
22575 EXP is the constant and ALIGN is the alignment that the object would
22577 The value of this function is used instead of that alignment to align
22581 ix86_constant_alignment (tree exp, int align)
22583 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
22584 || TREE_CODE (exp) == INTEGER_CST)
22586 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
22588 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
22591 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
22592 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
22593 return BITS_PER_WORD;
22598 /* Compute the alignment for a static variable.
22599 TYPE is the data type, and ALIGN is the alignment that
22600 the object would ordinarily have. The value of this function is used
22601 instead of that alignment to align the object. */
22604 ix86_data_alignment (tree type, int align)
22606 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
22608 if (AGGREGATE_TYPE_P (type)
22609 && TYPE_SIZE (type)
22610 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
22611 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
22612 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
22613 && align < max_align)
22616 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
22617 to 16byte boundary. */
22620 if (AGGREGATE_TYPE_P (type)
22621 && TYPE_SIZE (type)
22622 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
22623 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
22624 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
22628 if (TREE_CODE (type) == ARRAY_TYPE)
22630 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
22632 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
22635 else if (TREE_CODE (type) == COMPLEX_TYPE)
22638 if (TYPE_MODE (type) == DCmode && align < 64)
22640 if ((TYPE_MODE (type) == XCmode
22641 || TYPE_MODE (type) == TCmode) && align < 128)
22644 else if ((TREE_CODE (type) == RECORD_TYPE
22645 || TREE_CODE (type) == UNION_TYPE
22646 || TREE_CODE (type) == QUAL_UNION_TYPE)
22647 && TYPE_FIELDS (type))
22649 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
22651 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
22654 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
22655 || TREE_CODE (type) == INTEGER_TYPE)
22657 if (TYPE_MODE (type) == DFmode && align < 64)
22659 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
22666 /* Compute the alignment for a local variable or a stack slot. EXP is
22667 the data type or decl itself, MODE is the widest mode available and
22668 ALIGN is the alignment that the object would ordinarily have. The
22669 value of this macro is used instead of that alignment to align the
22673 ix86_local_alignment (tree exp, enum machine_mode mode,
22674 unsigned int align)
22678 if (exp && DECL_P (exp))
22680 type = TREE_TYPE (exp);
22689 /* Don't do dynamic stack realignment for long long objects with
22690 -mpreferred-stack-boundary=2. */
22693 && ix86_preferred_stack_boundary < 64
22694 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
22695 && (!type || !TYPE_USER_ALIGN (type))
22696 && (!decl || !DECL_USER_ALIGN (decl)))
22699 /* If TYPE is NULL, we are allocating a stack slot for caller-save
22700 register in MODE. We will return the largest alignment of XF
22704 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
22705 align = GET_MODE_ALIGNMENT (DFmode);
22709 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
22710 to 16byte boundary. Exact wording is:
22712 An array uses the same alignment as its elements, except that a local or
22713 global array variable of length at least 16 bytes or
22714 a C99 variable-length array variable always has alignment of at least 16 bytes.
22716 This was added to allow use of aligned SSE instructions at arrays. This
22717 rule is meant for static storage (where compiler can not do the analysis
22718 by itself). We follow it for automatic variables only when convenient.
22719 We fully control everything in the function compiled and functions from
22720 other unit can not rely on the alignment.
22722 Exclude va_list type. It is the common case of local array where
22723 we can not benefit from the alignment. */
22724 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
22727 if (AGGREGATE_TYPE_P (type)
22728 && (va_list_type_node == NULL_TREE
22729 || (TYPE_MAIN_VARIANT (type)
22730 != TYPE_MAIN_VARIANT (va_list_type_node)))
22731 && TYPE_SIZE (type)
22732 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
22733 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
22734 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
22737 if (TREE_CODE (type) == ARRAY_TYPE)
22739 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
22741 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
22744 else if (TREE_CODE (type) == COMPLEX_TYPE)
22746 if (TYPE_MODE (type) == DCmode && align < 64)
22748 if ((TYPE_MODE (type) == XCmode
22749 || TYPE_MODE (type) == TCmode) && align < 128)
22752 else if ((TREE_CODE (type) == RECORD_TYPE
22753 || TREE_CODE (type) == UNION_TYPE
22754 || TREE_CODE (type) == QUAL_UNION_TYPE)
22755 && TYPE_FIELDS (type))
22757 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
22759 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
22762 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
22763 || TREE_CODE (type) == INTEGER_TYPE)
22766 if (TYPE_MODE (type) == DFmode && align < 64)
22768 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
22774 /* Compute the minimum required alignment for dynamic stack realignment
22775 purposes for a local variable, parameter or a stack slot. EXP is
22776 the data type or decl itself, MODE is its mode and ALIGN is the
22777 alignment that the object would ordinarily have. */
22780 ix86_minimum_alignment (tree exp, enum machine_mode mode,
22781 unsigned int align)
22785 if (exp && DECL_P (exp))
22787 type = TREE_TYPE (exp);
22796 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
22799 /* Don't do dynamic stack realignment for long long objects with
22800 -mpreferred-stack-boundary=2. */
22801 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
22802 && (!type || !TYPE_USER_ALIGN (type))
22803 && (!decl || !DECL_USER_ALIGN (decl)))
22809 /* Find a location for the static chain incoming to a nested function.
22810 This is a register, unless all free registers are used by arguments. */
22813 ix86_static_chain (const_tree fndecl, bool incoming_p)
22817 if (!DECL_STATIC_CHAIN (fndecl))
22822 /* We always use R10 in 64-bit mode. */
22830 /* By default in 32-bit mode we use ECX to pass the static chain. */
22833 fntype = TREE_TYPE (fndecl);
22834 ccvt = ix86_get_callcvt (fntype);
22835 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) != 0)
22837 /* Fastcall functions use ecx/edx for arguments, which leaves
22838 us with EAX for the static chain.
22839 Thiscall functions use ecx for arguments, which also
22840 leaves us with EAX for the static chain. */
22843 else if (ix86_function_regparm (fntype, fndecl) == 3)
22845 /* For regparm 3, we have no free call-clobbered registers in
22846 which to store the static chain. In order to implement this,
22847 we have the trampoline push the static chain to the stack.
22848 However, we can't push a value below the return address when
22849 we call the nested function directly, so we have to use an
22850 alternate entry point. For this we use ESI, and have the
22851 alternate entry point push ESI, so that things appear the
22852 same once we're executing the nested function. */
22855 if (fndecl == current_function_decl)
22856 ix86_static_chain_on_stack = true;
22857 return gen_frame_mem (SImode,
22858 plus_constant (arg_pointer_rtx, -8));
22864 return gen_rtx_REG (Pmode, regno);
22867 /* Emit RTL insns to initialize the variable parts of a trampoline.
22868 FNDECL is the decl of the target address; M_TRAMP is a MEM for
22869 the trampoline, and CHAIN_VALUE is an RTX for the static chain
22870 to be passed to the target function. */
22873 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
22879 fnaddr = XEXP (DECL_RTL (fndecl), 0);
22885 /* Load the function address to r11. Try to load address using
22886 the shorter movl instead of movabs. We may want to support
22887 movq for kernel mode, but kernel does not use trampolines at
22889 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
22891 fnaddr = copy_to_mode_reg (DImode, fnaddr);
22893 mem = adjust_address (m_tramp, HImode, offset);
22894 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
22896 mem = adjust_address (m_tramp, SImode, offset + 2);
22897 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
22902 mem = adjust_address (m_tramp, HImode, offset);
22903 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
22905 mem = adjust_address (m_tramp, DImode, offset + 2);
22906 emit_move_insn (mem, fnaddr);
22910 /* Load static chain using movabs to r10. Use the
22911 shorter movl instead of movabs for x32. */
22923 mem = adjust_address (m_tramp, HImode, offset);
22924 emit_move_insn (mem, gen_int_mode (opcode, HImode));
22926 mem = adjust_address (m_tramp, ptr_mode, offset + 2);
22927 emit_move_insn (mem, chain_value);
22930 /* Jump to r11; the last (unused) byte is a nop, only there to
22931 pad the write out to a single 32-bit store. */
22932 mem = adjust_address (m_tramp, SImode, offset);
22933 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
22940 /* Depending on the static chain location, either load a register
22941 with a constant, or push the constant to the stack. All of the
22942 instructions are the same size. */
22943 chain = ix86_static_chain (fndecl, true);
22946 switch (REGNO (chain))
22949 opcode = 0xb8; break;
22951 opcode = 0xb9; break;
22953 gcc_unreachable ();
22959 mem = adjust_address (m_tramp, QImode, offset);
22960 emit_move_insn (mem, gen_int_mode (opcode, QImode));
22962 mem = adjust_address (m_tramp, SImode, offset + 1);
22963 emit_move_insn (mem, chain_value);
22966 mem = adjust_address (m_tramp, QImode, offset);
22967 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
22969 mem = adjust_address (m_tramp, SImode, offset + 1);
22971 /* Compute offset from the end of the jmp to the target function.
22972 In the case in which the trampoline stores the static chain on
22973 the stack, we need to skip the first insn which pushes the
22974 (call-saved) register static chain; this push is 1 byte. */
22976 disp = expand_binop (SImode, sub_optab, fnaddr,
22977 plus_constant (XEXP (m_tramp, 0),
22978 offset - (MEM_P (chain) ? 1 : 0)),
22979 NULL_RTX, 1, OPTAB_DIRECT);
22980 emit_move_insn (mem, disp);
22983 gcc_assert (offset <= TRAMPOLINE_SIZE);
22985 #ifdef HAVE_ENABLE_EXECUTE_STACK
22986 #ifdef CHECK_EXECUTE_STACK_ENABLED
22987 if (CHECK_EXECUTE_STACK_ENABLED)
22989 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
22990 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
22994 /* The following file contains several enumerations and data structures
22995 built from the definitions in i386-builtin-types.def. */
22997 #include "i386-builtin-types.inc"
22999 /* Table for the ix86 builtin non-function types. */
23000 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
23002 /* Retrieve an element from the above table, building some of
23003 the types lazily. */
23006 ix86_get_builtin_type (enum ix86_builtin_type tcode)
23008 unsigned int index;
23011 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
23013 type = ix86_builtin_type_tab[(int) tcode];
23017 gcc_assert (tcode > IX86_BT_LAST_PRIM);
23018 if (tcode <= IX86_BT_LAST_VECT)
23020 enum machine_mode mode;
23022 index = tcode - IX86_BT_LAST_PRIM - 1;
23023 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
23024 mode = ix86_builtin_type_vect_mode[index];
23026 type = build_vector_type_for_mode (itype, mode);
23032 index = tcode - IX86_BT_LAST_VECT - 1;
23033 if (tcode <= IX86_BT_LAST_PTR)
23034 quals = TYPE_UNQUALIFIED;
23036 quals = TYPE_QUAL_CONST;
23038 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
23039 if (quals != TYPE_UNQUALIFIED)
23040 itype = build_qualified_type (itype, quals);
23042 type = build_pointer_type (itype);
23045 ix86_builtin_type_tab[(int) tcode] = type;
23049 /* Table for the ix86 builtin function types. */
23050 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
23052 /* Retrieve an element from the above table, building some of
23053 the types lazily. */
23056 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
23060 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
23062 type = ix86_builtin_func_type_tab[(int) tcode];
23066 if (tcode <= IX86_BT_LAST_FUNC)
23068 unsigned start = ix86_builtin_func_start[(int) tcode];
23069 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
23070 tree rtype, atype, args = void_list_node;
23073 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
23074 for (i = after - 1; i > start; --i)
23076 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
23077 args = tree_cons (NULL, atype, args);
23080 type = build_function_type (rtype, args);
23084 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
23085 enum ix86_builtin_func_type icode;
23087 icode = ix86_builtin_func_alias_base[index];
23088 type = ix86_get_builtin_func_type (icode);
23091 ix86_builtin_func_type_tab[(int) tcode] = type;
23096 /* Codes for all the SSE/MMX builtins. */
23099 IX86_BUILTIN_ADDPS,
23100 IX86_BUILTIN_ADDSS,
23101 IX86_BUILTIN_DIVPS,
23102 IX86_BUILTIN_DIVSS,
23103 IX86_BUILTIN_MULPS,
23104 IX86_BUILTIN_MULSS,
23105 IX86_BUILTIN_SUBPS,
23106 IX86_BUILTIN_SUBSS,
23108 IX86_BUILTIN_CMPEQPS,
23109 IX86_BUILTIN_CMPLTPS,
23110 IX86_BUILTIN_CMPLEPS,
23111 IX86_BUILTIN_CMPGTPS,
23112 IX86_BUILTIN_CMPGEPS,
23113 IX86_BUILTIN_CMPNEQPS,
23114 IX86_BUILTIN_CMPNLTPS,
23115 IX86_BUILTIN_CMPNLEPS,
23116 IX86_BUILTIN_CMPNGTPS,
23117 IX86_BUILTIN_CMPNGEPS,
23118 IX86_BUILTIN_CMPORDPS,
23119 IX86_BUILTIN_CMPUNORDPS,
23120 IX86_BUILTIN_CMPEQSS,
23121 IX86_BUILTIN_CMPLTSS,
23122 IX86_BUILTIN_CMPLESS,
23123 IX86_BUILTIN_CMPNEQSS,
23124 IX86_BUILTIN_CMPNLTSS,
23125 IX86_BUILTIN_CMPNLESS,
23126 IX86_BUILTIN_CMPNGTSS,
23127 IX86_BUILTIN_CMPNGESS,
23128 IX86_BUILTIN_CMPORDSS,
23129 IX86_BUILTIN_CMPUNORDSS,
23131 IX86_BUILTIN_COMIEQSS,
23132 IX86_BUILTIN_COMILTSS,
23133 IX86_BUILTIN_COMILESS,
23134 IX86_BUILTIN_COMIGTSS,
23135 IX86_BUILTIN_COMIGESS,
23136 IX86_BUILTIN_COMINEQSS,
23137 IX86_BUILTIN_UCOMIEQSS,
23138 IX86_BUILTIN_UCOMILTSS,
23139 IX86_BUILTIN_UCOMILESS,
23140 IX86_BUILTIN_UCOMIGTSS,
23141 IX86_BUILTIN_UCOMIGESS,
23142 IX86_BUILTIN_UCOMINEQSS,
23144 IX86_BUILTIN_CVTPI2PS,
23145 IX86_BUILTIN_CVTPS2PI,
23146 IX86_BUILTIN_CVTSI2SS,
23147 IX86_BUILTIN_CVTSI642SS,
23148 IX86_BUILTIN_CVTSS2SI,
23149 IX86_BUILTIN_CVTSS2SI64,
23150 IX86_BUILTIN_CVTTPS2PI,
23151 IX86_BUILTIN_CVTTSS2SI,
23152 IX86_BUILTIN_CVTTSS2SI64,
23154 IX86_BUILTIN_MAXPS,
23155 IX86_BUILTIN_MAXSS,
23156 IX86_BUILTIN_MINPS,
23157 IX86_BUILTIN_MINSS,
23159 IX86_BUILTIN_LOADUPS,
23160 IX86_BUILTIN_STOREUPS,
23161 IX86_BUILTIN_MOVSS,
23163 IX86_BUILTIN_MOVHLPS,
23164 IX86_BUILTIN_MOVLHPS,
23165 IX86_BUILTIN_LOADHPS,
23166 IX86_BUILTIN_LOADLPS,
23167 IX86_BUILTIN_STOREHPS,
23168 IX86_BUILTIN_STORELPS,
23170 IX86_BUILTIN_MASKMOVQ,
23171 IX86_BUILTIN_MOVMSKPS,
23172 IX86_BUILTIN_PMOVMSKB,
23174 IX86_BUILTIN_MOVNTPS,
23175 IX86_BUILTIN_MOVNTQ,
23177 IX86_BUILTIN_LOADDQU,
23178 IX86_BUILTIN_STOREDQU,
23180 IX86_BUILTIN_PACKSSWB,
23181 IX86_BUILTIN_PACKSSDW,
23182 IX86_BUILTIN_PACKUSWB,
23184 IX86_BUILTIN_PADDB,
23185 IX86_BUILTIN_PADDW,
23186 IX86_BUILTIN_PADDD,
23187 IX86_BUILTIN_PADDQ,
23188 IX86_BUILTIN_PADDSB,
23189 IX86_BUILTIN_PADDSW,
23190 IX86_BUILTIN_PADDUSB,
23191 IX86_BUILTIN_PADDUSW,
23192 IX86_BUILTIN_PSUBB,
23193 IX86_BUILTIN_PSUBW,
23194 IX86_BUILTIN_PSUBD,
23195 IX86_BUILTIN_PSUBQ,
23196 IX86_BUILTIN_PSUBSB,
23197 IX86_BUILTIN_PSUBSW,
23198 IX86_BUILTIN_PSUBUSB,
23199 IX86_BUILTIN_PSUBUSW,
23202 IX86_BUILTIN_PANDN,
23206 IX86_BUILTIN_PAVGB,
23207 IX86_BUILTIN_PAVGW,
23209 IX86_BUILTIN_PCMPEQB,
23210 IX86_BUILTIN_PCMPEQW,
23211 IX86_BUILTIN_PCMPEQD,
23212 IX86_BUILTIN_PCMPGTB,
23213 IX86_BUILTIN_PCMPGTW,
23214 IX86_BUILTIN_PCMPGTD,
23216 IX86_BUILTIN_PMADDWD,
23218 IX86_BUILTIN_PMAXSW,
23219 IX86_BUILTIN_PMAXUB,
23220 IX86_BUILTIN_PMINSW,
23221 IX86_BUILTIN_PMINUB,
23223 IX86_BUILTIN_PMULHUW,
23224 IX86_BUILTIN_PMULHW,
23225 IX86_BUILTIN_PMULLW,
23227 IX86_BUILTIN_PSADBW,
23228 IX86_BUILTIN_PSHUFW,
23230 IX86_BUILTIN_PSLLW,
23231 IX86_BUILTIN_PSLLD,
23232 IX86_BUILTIN_PSLLQ,
23233 IX86_BUILTIN_PSRAW,
23234 IX86_BUILTIN_PSRAD,
23235 IX86_BUILTIN_PSRLW,
23236 IX86_BUILTIN_PSRLD,
23237 IX86_BUILTIN_PSRLQ,
23238 IX86_BUILTIN_PSLLWI,
23239 IX86_BUILTIN_PSLLDI,
23240 IX86_BUILTIN_PSLLQI,
23241 IX86_BUILTIN_PSRAWI,
23242 IX86_BUILTIN_PSRADI,
23243 IX86_BUILTIN_PSRLWI,
23244 IX86_BUILTIN_PSRLDI,
23245 IX86_BUILTIN_PSRLQI,
23247 IX86_BUILTIN_PUNPCKHBW,
23248 IX86_BUILTIN_PUNPCKHWD,
23249 IX86_BUILTIN_PUNPCKHDQ,
23250 IX86_BUILTIN_PUNPCKLBW,
23251 IX86_BUILTIN_PUNPCKLWD,
23252 IX86_BUILTIN_PUNPCKLDQ,
23254 IX86_BUILTIN_SHUFPS,
23256 IX86_BUILTIN_RCPPS,
23257 IX86_BUILTIN_RCPSS,
23258 IX86_BUILTIN_RSQRTPS,
23259 IX86_BUILTIN_RSQRTPS_NR,
23260 IX86_BUILTIN_RSQRTSS,
23261 IX86_BUILTIN_RSQRTF,
23262 IX86_BUILTIN_SQRTPS,
23263 IX86_BUILTIN_SQRTPS_NR,
23264 IX86_BUILTIN_SQRTSS,
23266 IX86_BUILTIN_UNPCKHPS,
23267 IX86_BUILTIN_UNPCKLPS,
23269 IX86_BUILTIN_ANDPS,
23270 IX86_BUILTIN_ANDNPS,
23272 IX86_BUILTIN_XORPS,
23275 IX86_BUILTIN_LDMXCSR,
23276 IX86_BUILTIN_STMXCSR,
23277 IX86_BUILTIN_SFENCE,
23279 /* 3DNow! Original */
23280 IX86_BUILTIN_FEMMS,
23281 IX86_BUILTIN_PAVGUSB,
23282 IX86_BUILTIN_PF2ID,
23283 IX86_BUILTIN_PFACC,
23284 IX86_BUILTIN_PFADD,
23285 IX86_BUILTIN_PFCMPEQ,
23286 IX86_BUILTIN_PFCMPGE,
23287 IX86_BUILTIN_PFCMPGT,
23288 IX86_BUILTIN_PFMAX,
23289 IX86_BUILTIN_PFMIN,
23290 IX86_BUILTIN_PFMUL,
23291 IX86_BUILTIN_PFRCP,
23292 IX86_BUILTIN_PFRCPIT1,
23293 IX86_BUILTIN_PFRCPIT2,
23294 IX86_BUILTIN_PFRSQIT1,
23295 IX86_BUILTIN_PFRSQRT,
23296 IX86_BUILTIN_PFSUB,
23297 IX86_BUILTIN_PFSUBR,
23298 IX86_BUILTIN_PI2FD,
23299 IX86_BUILTIN_PMULHRW,
23301 /* 3DNow! Athlon Extensions */
23302 IX86_BUILTIN_PF2IW,
23303 IX86_BUILTIN_PFNACC,
23304 IX86_BUILTIN_PFPNACC,
23305 IX86_BUILTIN_PI2FW,
23306 IX86_BUILTIN_PSWAPDSI,
23307 IX86_BUILTIN_PSWAPDSF,
23310 IX86_BUILTIN_ADDPD,
23311 IX86_BUILTIN_ADDSD,
23312 IX86_BUILTIN_DIVPD,
23313 IX86_BUILTIN_DIVSD,
23314 IX86_BUILTIN_MULPD,
23315 IX86_BUILTIN_MULSD,
23316 IX86_BUILTIN_SUBPD,
23317 IX86_BUILTIN_SUBSD,
23319 IX86_BUILTIN_CMPEQPD,
23320 IX86_BUILTIN_CMPLTPD,
23321 IX86_BUILTIN_CMPLEPD,
23322 IX86_BUILTIN_CMPGTPD,
23323 IX86_BUILTIN_CMPGEPD,
23324 IX86_BUILTIN_CMPNEQPD,
23325 IX86_BUILTIN_CMPNLTPD,
23326 IX86_BUILTIN_CMPNLEPD,
23327 IX86_BUILTIN_CMPNGTPD,
23328 IX86_BUILTIN_CMPNGEPD,
23329 IX86_BUILTIN_CMPORDPD,
23330 IX86_BUILTIN_CMPUNORDPD,
23331 IX86_BUILTIN_CMPEQSD,
23332 IX86_BUILTIN_CMPLTSD,
23333 IX86_BUILTIN_CMPLESD,
23334 IX86_BUILTIN_CMPNEQSD,
23335 IX86_BUILTIN_CMPNLTSD,
23336 IX86_BUILTIN_CMPNLESD,
23337 IX86_BUILTIN_CMPORDSD,
23338 IX86_BUILTIN_CMPUNORDSD,
23340 IX86_BUILTIN_COMIEQSD,
23341 IX86_BUILTIN_COMILTSD,
23342 IX86_BUILTIN_COMILESD,
23343 IX86_BUILTIN_COMIGTSD,
23344 IX86_BUILTIN_COMIGESD,
23345 IX86_BUILTIN_COMINEQSD,
23346 IX86_BUILTIN_UCOMIEQSD,
23347 IX86_BUILTIN_UCOMILTSD,
23348 IX86_BUILTIN_UCOMILESD,
23349 IX86_BUILTIN_UCOMIGTSD,
23350 IX86_BUILTIN_UCOMIGESD,
23351 IX86_BUILTIN_UCOMINEQSD,
23353 IX86_BUILTIN_MAXPD,
23354 IX86_BUILTIN_MAXSD,
23355 IX86_BUILTIN_MINPD,
23356 IX86_BUILTIN_MINSD,
23358 IX86_BUILTIN_ANDPD,
23359 IX86_BUILTIN_ANDNPD,
23361 IX86_BUILTIN_XORPD,
23363 IX86_BUILTIN_SQRTPD,
23364 IX86_BUILTIN_SQRTSD,
23366 IX86_BUILTIN_UNPCKHPD,
23367 IX86_BUILTIN_UNPCKLPD,
23369 IX86_BUILTIN_SHUFPD,
23371 IX86_BUILTIN_LOADUPD,
23372 IX86_BUILTIN_STOREUPD,
23373 IX86_BUILTIN_MOVSD,
23375 IX86_BUILTIN_LOADHPD,
23376 IX86_BUILTIN_LOADLPD,
23378 IX86_BUILTIN_CVTDQ2PD,
23379 IX86_BUILTIN_CVTDQ2PS,
23381 IX86_BUILTIN_CVTPD2DQ,
23382 IX86_BUILTIN_CVTPD2PI,
23383 IX86_BUILTIN_CVTPD2PS,
23384 IX86_BUILTIN_CVTTPD2DQ,
23385 IX86_BUILTIN_CVTTPD2PI,
23387 IX86_BUILTIN_CVTPI2PD,
23388 IX86_BUILTIN_CVTSI2SD,
23389 IX86_BUILTIN_CVTSI642SD,
23391 IX86_BUILTIN_CVTSD2SI,
23392 IX86_BUILTIN_CVTSD2SI64,
23393 IX86_BUILTIN_CVTSD2SS,
23394 IX86_BUILTIN_CVTSS2SD,
23395 IX86_BUILTIN_CVTTSD2SI,
23396 IX86_BUILTIN_CVTTSD2SI64,
23398 IX86_BUILTIN_CVTPS2DQ,
23399 IX86_BUILTIN_CVTPS2PD,
23400 IX86_BUILTIN_CVTTPS2DQ,
23402 IX86_BUILTIN_MOVNTI,
23403 IX86_BUILTIN_MOVNTPD,
23404 IX86_BUILTIN_MOVNTDQ,
23406 IX86_BUILTIN_MOVQ128,
23409 IX86_BUILTIN_MASKMOVDQU,
23410 IX86_BUILTIN_MOVMSKPD,
23411 IX86_BUILTIN_PMOVMSKB128,
23413 IX86_BUILTIN_PACKSSWB128,
23414 IX86_BUILTIN_PACKSSDW128,
23415 IX86_BUILTIN_PACKUSWB128,
23417 IX86_BUILTIN_PADDB128,
23418 IX86_BUILTIN_PADDW128,
23419 IX86_BUILTIN_PADDD128,
23420 IX86_BUILTIN_PADDQ128,
23421 IX86_BUILTIN_PADDSB128,
23422 IX86_BUILTIN_PADDSW128,
23423 IX86_BUILTIN_PADDUSB128,
23424 IX86_BUILTIN_PADDUSW128,
23425 IX86_BUILTIN_PSUBB128,
23426 IX86_BUILTIN_PSUBW128,
23427 IX86_BUILTIN_PSUBD128,
23428 IX86_BUILTIN_PSUBQ128,
23429 IX86_BUILTIN_PSUBSB128,
23430 IX86_BUILTIN_PSUBSW128,
23431 IX86_BUILTIN_PSUBUSB128,
23432 IX86_BUILTIN_PSUBUSW128,
23434 IX86_BUILTIN_PAND128,
23435 IX86_BUILTIN_PANDN128,
23436 IX86_BUILTIN_POR128,
23437 IX86_BUILTIN_PXOR128,
23439 IX86_BUILTIN_PAVGB128,
23440 IX86_BUILTIN_PAVGW128,
23442 IX86_BUILTIN_PCMPEQB128,
23443 IX86_BUILTIN_PCMPEQW128,
23444 IX86_BUILTIN_PCMPEQD128,
23445 IX86_BUILTIN_PCMPGTB128,
23446 IX86_BUILTIN_PCMPGTW128,
23447 IX86_BUILTIN_PCMPGTD128,
23449 IX86_BUILTIN_PMADDWD128,
23451 IX86_BUILTIN_PMAXSW128,
23452 IX86_BUILTIN_PMAXUB128,
23453 IX86_BUILTIN_PMINSW128,
23454 IX86_BUILTIN_PMINUB128,
23456 IX86_BUILTIN_PMULUDQ,
23457 IX86_BUILTIN_PMULUDQ128,
23458 IX86_BUILTIN_PMULHUW128,
23459 IX86_BUILTIN_PMULHW128,
23460 IX86_BUILTIN_PMULLW128,
23462 IX86_BUILTIN_PSADBW128,
23463 IX86_BUILTIN_PSHUFHW,
23464 IX86_BUILTIN_PSHUFLW,
23465 IX86_BUILTIN_PSHUFD,
23467 IX86_BUILTIN_PSLLDQI128,
23468 IX86_BUILTIN_PSLLWI128,
23469 IX86_BUILTIN_PSLLDI128,
23470 IX86_BUILTIN_PSLLQI128,
23471 IX86_BUILTIN_PSRAWI128,
23472 IX86_BUILTIN_PSRADI128,
23473 IX86_BUILTIN_PSRLDQI128,
23474 IX86_BUILTIN_PSRLWI128,
23475 IX86_BUILTIN_PSRLDI128,
23476 IX86_BUILTIN_PSRLQI128,
23478 IX86_BUILTIN_PSLLDQ128,
23479 IX86_BUILTIN_PSLLW128,
23480 IX86_BUILTIN_PSLLD128,
23481 IX86_BUILTIN_PSLLQ128,
23482 IX86_BUILTIN_PSRAW128,
23483 IX86_BUILTIN_PSRAD128,
23484 IX86_BUILTIN_PSRLW128,
23485 IX86_BUILTIN_PSRLD128,
23486 IX86_BUILTIN_PSRLQ128,
23488 IX86_BUILTIN_PUNPCKHBW128,
23489 IX86_BUILTIN_PUNPCKHWD128,
23490 IX86_BUILTIN_PUNPCKHDQ128,
23491 IX86_BUILTIN_PUNPCKHQDQ128,
23492 IX86_BUILTIN_PUNPCKLBW128,
23493 IX86_BUILTIN_PUNPCKLWD128,
23494 IX86_BUILTIN_PUNPCKLDQ128,
23495 IX86_BUILTIN_PUNPCKLQDQ128,
23497 IX86_BUILTIN_CLFLUSH,
23498 IX86_BUILTIN_MFENCE,
23499 IX86_BUILTIN_LFENCE,
23500 IX86_BUILTIN_PAUSE,
23502 IX86_BUILTIN_BSRSI,
23503 IX86_BUILTIN_BSRDI,
23504 IX86_BUILTIN_RDPMC,
23505 IX86_BUILTIN_RDTSC,
23506 IX86_BUILTIN_RDTSCP,
23507 IX86_BUILTIN_ROLQI,
23508 IX86_BUILTIN_ROLHI,
23509 IX86_BUILTIN_RORQI,
23510 IX86_BUILTIN_RORHI,
23513 IX86_BUILTIN_ADDSUBPS,
23514 IX86_BUILTIN_HADDPS,
23515 IX86_BUILTIN_HSUBPS,
23516 IX86_BUILTIN_MOVSHDUP,
23517 IX86_BUILTIN_MOVSLDUP,
23518 IX86_BUILTIN_ADDSUBPD,
23519 IX86_BUILTIN_HADDPD,
23520 IX86_BUILTIN_HSUBPD,
23521 IX86_BUILTIN_LDDQU,
23523 IX86_BUILTIN_MONITOR,
23524 IX86_BUILTIN_MWAIT,
23527 IX86_BUILTIN_PHADDW,
23528 IX86_BUILTIN_PHADDD,
23529 IX86_BUILTIN_PHADDSW,
23530 IX86_BUILTIN_PHSUBW,
23531 IX86_BUILTIN_PHSUBD,
23532 IX86_BUILTIN_PHSUBSW,
23533 IX86_BUILTIN_PMADDUBSW,
23534 IX86_BUILTIN_PMULHRSW,
23535 IX86_BUILTIN_PSHUFB,
23536 IX86_BUILTIN_PSIGNB,
23537 IX86_BUILTIN_PSIGNW,
23538 IX86_BUILTIN_PSIGND,
23539 IX86_BUILTIN_PALIGNR,
23540 IX86_BUILTIN_PABSB,
23541 IX86_BUILTIN_PABSW,
23542 IX86_BUILTIN_PABSD,
23544 IX86_BUILTIN_PHADDW128,
23545 IX86_BUILTIN_PHADDD128,
23546 IX86_BUILTIN_PHADDSW128,
23547 IX86_BUILTIN_PHSUBW128,
23548 IX86_BUILTIN_PHSUBD128,
23549 IX86_BUILTIN_PHSUBSW128,
23550 IX86_BUILTIN_PMADDUBSW128,
23551 IX86_BUILTIN_PMULHRSW128,
23552 IX86_BUILTIN_PSHUFB128,
23553 IX86_BUILTIN_PSIGNB128,
23554 IX86_BUILTIN_PSIGNW128,
23555 IX86_BUILTIN_PSIGND128,
23556 IX86_BUILTIN_PALIGNR128,
23557 IX86_BUILTIN_PABSB128,
23558 IX86_BUILTIN_PABSW128,
23559 IX86_BUILTIN_PABSD128,
23561 /* AMDFAM10 - SSE4A New Instructions. */
23562 IX86_BUILTIN_MOVNTSD,
23563 IX86_BUILTIN_MOVNTSS,
23564 IX86_BUILTIN_EXTRQI,
23565 IX86_BUILTIN_EXTRQ,
23566 IX86_BUILTIN_INSERTQI,
23567 IX86_BUILTIN_INSERTQ,
23570 IX86_BUILTIN_BLENDPD,
23571 IX86_BUILTIN_BLENDPS,
23572 IX86_BUILTIN_BLENDVPD,
23573 IX86_BUILTIN_BLENDVPS,
23574 IX86_BUILTIN_PBLENDVB128,
23575 IX86_BUILTIN_PBLENDW128,
23580 IX86_BUILTIN_INSERTPS128,
23582 IX86_BUILTIN_MOVNTDQA,
23583 IX86_BUILTIN_MPSADBW128,
23584 IX86_BUILTIN_PACKUSDW128,
23585 IX86_BUILTIN_PCMPEQQ,
23586 IX86_BUILTIN_PHMINPOSUW128,
23588 IX86_BUILTIN_PMAXSB128,
23589 IX86_BUILTIN_PMAXSD128,
23590 IX86_BUILTIN_PMAXUD128,
23591 IX86_BUILTIN_PMAXUW128,
23593 IX86_BUILTIN_PMINSB128,
23594 IX86_BUILTIN_PMINSD128,
23595 IX86_BUILTIN_PMINUD128,
23596 IX86_BUILTIN_PMINUW128,
23598 IX86_BUILTIN_PMOVSXBW128,
23599 IX86_BUILTIN_PMOVSXBD128,
23600 IX86_BUILTIN_PMOVSXBQ128,
23601 IX86_BUILTIN_PMOVSXWD128,
23602 IX86_BUILTIN_PMOVSXWQ128,
23603 IX86_BUILTIN_PMOVSXDQ128,
23605 IX86_BUILTIN_PMOVZXBW128,
23606 IX86_BUILTIN_PMOVZXBD128,
23607 IX86_BUILTIN_PMOVZXBQ128,
23608 IX86_BUILTIN_PMOVZXWD128,
23609 IX86_BUILTIN_PMOVZXWQ128,
23610 IX86_BUILTIN_PMOVZXDQ128,
23612 IX86_BUILTIN_PMULDQ128,
23613 IX86_BUILTIN_PMULLD128,
23615 IX86_BUILTIN_ROUNDPD,
23616 IX86_BUILTIN_ROUNDPS,
23617 IX86_BUILTIN_ROUNDSD,
23618 IX86_BUILTIN_ROUNDSS,
23620 IX86_BUILTIN_FLOORPD,
23621 IX86_BUILTIN_CEILPD,
23622 IX86_BUILTIN_TRUNCPD,
23623 IX86_BUILTIN_RINTPD,
23624 IX86_BUILTIN_FLOORPS,
23625 IX86_BUILTIN_CEILPS,
23626 IX86_BUILTIN_TRUNCPS,
23627 IX86_BUILTIN_RINTPS,
23629 IX86_BUILTIN_PTESTZ,
23630 IX86_BUILTIN_PTESTC,
23631 IX86_BUILTIN_PTESTNZC,
23633 IX86_BUILTIN_VEC_INIT_V2SI,
23634 IX86_BUILTIN_VEC_INIT_V4HI,
23635 IX86_BUILTIN_VEC_INIT_V8QI,
23636 IX86_BUILTIN_VEC_EXT_V2DF,
23637 IX86_BUILTIN_VEC_EXT_V2DI,
23638 IX86_BUILTIN_VEC_EXT_V4SF,
23639 IX86_BUILTIN_VEC_EXT_V4SI,
23640 IX86_BUILTIN_VEC_EXT_V8HI,
23641 IX86_BUILTIN_VEC_EXT_V2SI,
23642 IX86_BUILTIN_VEC_EXT_V4HI,
23643 IX86_BUILTIN_VEC_EXT_V16QI,
23644 IX86_BUILTIN_VEC_SET_V2DI,
23645 IX86_BUILTIN_VEC_SET_V4SF,
23646 IX86_BUILTIN_VEC_SET_V4SI,
23647 IX86_BUILTIN_VEC_SET_V8HI,
23648 IX86_BUILTIN_VEC_SET_V4HI,
23649 IX86_BUILTIN_VEC_SET_V16QI,
23651 IX86_BUILTIN_VEC_PACK_SFIX,
23654 IX86_BUILTIN_CRC32QI,
23655 IX86_BUILTIN_CRC32HI,
23656 IX86_BUILTIN_CRC32SI,
23657 IX86_BUILTIN_CRC32DI,
23659 IX86_BUILTIN_PCMPESTRI128,
23660 IX86_BUILTIN_PCMPESTRM128,
23661 IX86_BUILTIN_PCMPESTRA128,
23662 IX86_BUILTIN_PCMPESTRC128,
23663 IX86_BUILTIN_PCMPESTRO128,
23664 IX86_BUILTIN_PCMPESTRS128,
23665 IX86_BUILTIN_PCMPESTRZ128,
23666 IX86_BUILTIN_PCMPISTRI128,
23667 IX86_BUILTIN_PCMPISTRM128,
23668 IX86_BUILTIN_PCMPISTRA128,
23669 IX86_BUILTIN_PCMPISTRC128,
23670 IX86_BUILTIN_PCMPISTRO128,
23671 IX86_BUILTIN_PCMPISTRS128,
23672 IX86_BUILTIN_PCMPISTRZ128,
23674 IX86_BUILTIN_PCMPGTQ,
23676 /* AES instructions */
23677 IX86_BUILTIN_AESENC128,
23678 IX86_BUILTIN_AESENCLAST128,
23679 IX86_BUILTIN_AESDEC128,
23680 IX86_BUILTIN_AESDECLAST128,
23681 IX86_BUILTIN_AESIMC128,
23682 IX86_BUILTIN_AESKEYGENASSIST128,
23684 /* PCLMUL instruction */
23685 IX86_BUILTIN_PCLMULQDQ128,
23688 IX86_BUILTIN_ADDPD256,
23689 IX86_BUILTIN_ADDPS256,
23690 IX86_BUILTIN_ADDSUBPD256,
23691 IX86_BUILTIN_ADDSUBPS256,
23692 IX86_BUILTIN_ANDPD256,
23693 IX86_BUILTIN_ANDPS256,
23694 IX86_BUILTIN_ANDNPD256,
23695 IX86_BUILTIN_ANDNPS256,
23696 IX86_BUILTIN_BLENDPD256,
23697 IX86_BUILTIN_BLENDPS256,
23698 IX86_BUILTIN_BLENDVPD256,
23699 IX86_BUILTIN_BLENDVPS256,
23700 IX86_BUILTIN_DIVPD256,
23701 IX86_BUILTIN_DIVPS256,
23702 IX86_BUILTIN_DPPS256,
23703 IX86_BUILTIN_HADDPD256,
23704 IX86_BUILTIN_HADDPS256,
23705 IX86_BUILTIN_HSUBPD256,
23706 IX86_BUILTIN_HSUBPS256,
23707 IX86_BUILTIN_MAXPD256,
23708 IX86_BUILTIN_MAXPS256,
23709 IX86_BUILTIN_MINPD256,
23710 IX86_BUILTIN_MINPS256,
23711 IX86_BUILTIN_MULPD256,
23712 IX86_BUILTIN_MULPS256,
23713 IX86_BUILTIN_ORPD256,
23714 IX86_BUILTIN_ORPS256,
23715 IX86_BUILTIN_SHUFPD256,
23716 IX86_BUILTIN_SHUFPS256,
23717 IX86_BUILTIN_SUBPD256,
23718 IX86_BUILTIN_SUBPS256,
23719 IX86_BUILTIN_XORPD256,
23720 IX86_BUILTIN_XORPS256,
23721 IX86_BUILTIN_CMPSD,
23722 IX86_BUILTIN_CMPSS,
23723 IX86_BUILTIN_CMPPD,
23724 IX86_BUILTIN_CMPPS,
23725 IX86_BUILTIN_CMPPD256,
23726 IX86_BUILTIN_CMPPS256,
23727 IX86_BUILTIN_CVTDQ2PD256,
23728 IX86_BUILTIN_CVTDQ2PS256,
23729 IX86_BUILTIN_CVTPD2PS256,
23730 IX86_BUILTIN_CVTPS2DQ256,
23731 IX86_BUILTIN_CVTPS2PD256,
23732 IX86_BUILTIN_CVTTPD2DQ256,
23733 IX86_BUILTIN_CVTPD2DQ256,
23734 IX86_BUILTIN_CVTTPS2DQ256,
23735 IX86_BUILTIN_EXTRACTF128PD256,
23736 IX86_BUILTIN_EXTRACTF128PS256,
23737 IX86_BUILTIN_EXTRACTF128SI256,
23738 IX86_BUILTIN_VZEROALL,
23739 IX86_BUILTIN_VZEROUPPER,
23740 IX86_BUILTIN_VPERMILVARPD,
23741 IX86_BUILTIN_VPERMILVARPS,
23742 IX86_BUILTIN_VPERMILVARPD256,
23743 IX86_BUILTIN_VPERMILVARPS256,
23744 IX86_BUILTIN_VPERMILPD,
23745 IX86_BUILTIN_VPERMILPS,
23746 IX86_BUILTIN_VPERMILPD256,
23747 IX86_BUILTIN_VPERMILPS256,
23748 IX86_BUILTIN_VPERMIL2PD,
23749 IX86_BUILTIN_VPERMIL2PS,
23750 IX86_BUILTIN_VPERMIL2PD256,
23751 IX86_BUILTIN_VPERMIL2PS256,
23752 IX86_BUILTIN_VPERM2F128PD256,
23753 IX86_BUILTIN_VPERM2F128PS256,
23754 IX86_BUILTIN_VPERM2F128SI256,
23755 IX86_BUILTIN_VBROADCASTSS,
23756 IX86_BUILTIN_VBROADCASTSD256,
23757 IX86_BUILTIN_VBROADCASTSS256,
23758 IX86_BUILTIN_VBROADCASTPD256,
23759 IX86_BUILTIN_VBROADCASTPS256,
23760 IX86_BUILTIN_VINSERTF128PD256,
23761 IX86_BUILTIN_VINSERTF128PS256,
23762 IX86_BUILTIN_VINSERTF128SI256,
23763 IX86_BUILTIN_LOADUPD256,
23764 IX86_BUILTIN_LOADUPS256,
23765 IX86_BUILTIN_STOREUPD256,
23766 IX86_BUILTIN_STOREUPS256,
23767 IX86_BUILTIN_LDDQU256,
23768 IX86_BUILTIN_MOVNTDQ256,
23769 IX86_BUILTIN_MOVNTPD256,
23770 IX86_BUILTIN_MOVNTPS256,
23771 IX86_BUILTIN_LOADDQU256,
23772 IX86_BUILTIN_STOREDQU256,
23773 IX86_BUILTIN_MASKLOADPD,
23774 IX86_BUILTIN_MASKLOADPS,
23775 IX86_BUILTIN_MASKSTOREPD,
23776 IX86_BUILTIN_MASKSTOREPS,
23777 IX86_BUILTIN_MASKLOADPD256,
23778 IX86_BUILTIN_MASKLOADPS256,
23779 IX86_BUILTIN_MASKSTOREPD256,
23780 IX86_BUILTIN_MASKSTOREPS256,
23781 IX86_BUILTIN_MOVSHDUP256,
23782 IX86_BUILTIN_MOVSLDUP256,
23783 IX86_BUILTIN_MOVDDUP256,
23785 IX86_BUILTIN_SQRTPD256,
23786 IX86_BUILTIN_SQRTPS256,
23787 IX86_BUILTIN_SQRTPS_NR256,
23788 IX86_BUILTIN_RSQRTPS256,
23789 IX86_BUILTIN_RSQRTPS_NR256,
23791 IX86_BUILTIN_RCPPS256,
23793 IX86_BUILTIN_ROUNDPD256,
23794 IX86_BUILTIN_ROUNDPS256,
23796 IX86_BUILTIN_FLOORPD256,
23797 IX86_BUILTIN_CEILPD256,
23798 IX86_BUILTIN_TRUNCPD256,
23799 IX86_BUILTIN_RINTPD256,
23800 IX86_BUILTIN_FLOORPS256,
23801 IX86_BUILTIN_CEILPS256,
23802 IX86_BUILTIN_TRUNCPS256,
23803 IX86_BUILTIN_RINTPS256,
23805 IX86_BUILTIN_UNPCKHPD256,
23806 IX86_BUILTIN_UNPCKLPD256,
23807 IX86_BUILTIN_UNPCKHPS256,
23808 IX86_BUILTIN_UNPCKLPS256,
23810 IX86_BUILTIN_SI256_SI,
23811 IX86_BUILTIN_PS256_PS,
23812 IX86_BUILTIN_PD256_PD,
23813 IX86_BUILTIN_SI_SI256,
23814 IX86_BUILTIN_PS_PS256,
23815 IX86_BUILTIN_PD_PD256,
23817 IX86_BUILTIN_VTESTZPD,
23818 IX86_BUILTIN_VTESTCPD,
23819 IX86_BUILTIN_VTESTNZCPD,
23820 IX86_BUILTIN_VTESTZPS,
23821 IX86_BUILTIN_VTESTCPS,
23822 IX86_BUILTIN_VTESTNZCPS,
23823 IX86_BUILTIN_VTESTZPD256,
23824 IX86_BUILTIN_VTESTCPD256,
23825 IX86_BUILTIN_VTESTNZCPD256,
23826 IX86_BUILTIN_VTESTZPS256,
23827 IX86_BUILTIN_VTESTCPS256,
23828 IX86_BUILTIN_VTESTNZCPS256,
23829 IX86_BUILTIN_PTESTZ256,
23830 IX86_BUILTIN_PTESTC256,
23831 IX86_BUILTIN_PTESTNZC256,
23833 IX86_BUILTIN_MOVMSKPD256,
23834 IX86_BUILTIN_MOVMSKPS256,
23836 /* TFmode support builtins. */
23838 IX86_BUILTIN_HUGE_VALQ,
23839 IX86_BUILTIN_FABSQ,
23840 IX86_BUILTIN_COPYSIGNQ,
23842 /* Vectorizer support builtins. */
23843 IX86_BUILTIN_CPYSGNPS,
23844 IX86_BUILTIN_CPYSGNPD,
23845 IX86_BUILTIN_CPYSGNPS256,
23846 IX86_BUILTIN_CPYSGNPD256,
23848 IX86_BUILTIN_CVTUDQ2PS,
23850 IX86_BUILTIN_VEC_PERM_V2DF,
23851 IX86_BUILTIN_VEC_PERM_V4SF,
23852 IX86_BUILTIN_VEC_PERM_V2DI,
23853 IX86_BUILTIN_VEC_PERM_V4SI,
23854 IX86_BUILTIN_VEC_PERM_V8HI,
23855 IX86_BUILTIN_VEC_PERM_V16QI,
23856 IX86_BUILTIN_VEC_PERM_V2DI_U,
23857 IX86_BUILTIN_VEC_PERM_V4SI_U,
23858 IX86_BUILTIN_VEC_PERM_V8HI_U,
23859 IX86_BUILTIN_VEC_PERM_V16QI_U,
23860 IX86_BUILTIN_VEC_PERM_V4DF,
23861 IX86_BUILTIN_VEC_PERM_V8SF,
23863 /* FMA4 and XOP instructions. */
23864 IX86_BUILTIN_VFMADDSS,
23865 IX86_BUILTIN_VFMADDSD,
23866 IX86_BUILTIN_VFMADDPS,
23867 IX86_BUILTIN_VFMADDPD,
23868 IX86_BUILTIN_VFMADDPS256,
23869 IX86_BUILTIN_VFMADDPD256,
23870 IX86_BUILTIN_VFMADDSUBPS,
23871 IX86_BUILTIN_VFMADDSUBPD,
23872 IX86_BUILTIN_VFMADDSUBPS256,
23873 IX86_BUILTIN_VFMADDSUBPD256,
23875 IX86_BUILTIN_VPCMOV,
23876 IX86_BUILTIN_VPCMOV_V2DI,
23877 IX86_BUILTIN_VPCMOV_V4SI,
23878 IX86_BUILTIN_VPCMOV_V8HI,
23879 IX86_BUILTIN_VPCMOV_V16QI,
23880 IX86_BUILTIN_VPCMOV_V4SF,
23881 IX86_BUILTIN_VPCMOV_V2DF,
23882 IX86_BUILTIN_VPCMOV256,
23883 IX86_BUILTIN_VPCMOV_V4DI256,
23884 IX86_BUILTIN_VPCMOV_V8SI256,
23885 IX86_BUILTIN_VPCMOV_V16HI256,
23886 IX86_BUILTIN_VPCMOV_V32QI256,
23887 IX86_BUILTIN_VPCMOV_V8SF256,
23888 IX86_BUILTIN_VPCMOV_V4DF256,
23890 IX86_BUILTIN_VPPERM,
23892 IX86_BUILTIN_VPMACSSWW,
23893 IX86_BUILTIN_VPMACSWW,
23894 IX86_BUILTIN_VPMACSSWD,
23895 IX86_BUILTIN_VPMACSWD,
23896 IX86_BUILTIN_VPMACSSDD,
23897 IX86_BUILTIN_VPMACSDD,
23898 IX86_BUILTIN_VPMACSSDQL,
23899 IX86_BUILTIN_VPMACSSDQH,
23900 IX86_BUILTIN_VPMACSDQL,
23901 IX86_BUILTIN_VPMACSDQH,
23902 IX86_BUILTIN_VPMADCSSWD,
23903 IX86_BUILTIN_VPMADCSWD,
23905 IX86_BUILTIN_VPHADDBW,
23906 IX86_BUILTIN_VPHADDBD,
23907 IX86_BUILTIN_VPHADDBQ,
23908 IX86_BUILTIN_VPHADDWD,
23909 IX86_BUILTIN_VPHADDWQ,
23910 IX86_BUILTIN_VPHADDDQ,
23911 IX86_BUILTIN_VPHADDUBW,
23912 IX86_BUILTIN_VPHADDUBD,
23913 IX86_BUILTIN_VPHADDUBQ,
23914 IX86_BUILTIN_VPHADDUWD,
23915 IX86_BUILTIN_VPHADDUWQ,
23916 IX86_BUILTIN_VPHADDUDQ,
23917 IX86_BUILTIN_VPHSUBBW,
23918 IX86_BUILTIN_VPHSUBWD,
23919 IX86_BUILTIN_VPHSUBDQ,
23921 IX86_BUILTIN_VPROTB,
23922 IX86_BUILTIN_VPROTW,
23923 IX86_BUILTIN_VPROTD,
23924 IX86_BUILTIN_VPROTQ,
23925 IX86_BUILTIN_VPROTB_IMM,
23926 IX86_BUILTIN_VPROTW_IMM,
23927 IX86_BUILTIN_VPROTD_IMM,
23928 IX86_BUILTIN_VPROTQ_IMM,
23930 IX86_BUILTIN_VPSHLB,
23931 IX86_BUILTIN_VPSHLW,
23932 IX86_BUILTIN_VPSHLD,
23933 IX86_BUILTIN_VPSHLQ,
23934 IX86_BUILTIN_VPSHAB,
23935 IX86_BUILTIN_VPSHAW,
23936 IX86_BUILTIN_VPSHAD,
23937 IX86_BUILTIN_VPSHAQ,
23939 IX86_BUILTIN_VFRCZSS,
23940 IX86_BUILTIN_VFRCZSD,
23941 IX86_BUILTIN_VFRCZPS,
23942 IX86_BUILTIN_VFRCZPD,
23943 IX86_BUILTIN_VFRCZPS256,
23944 IX86_BUILTIN_VFRCZPD256,
23946 IX86_BUILTIN_VPCOMEQUB,
23947 IX86_BUILTIN_VPCOMNEUB,
23948 IX86_BUILTIN_VPCOMLTUB,
23949 IX86_BUILTIN_VPCOMLEUB,
23950 IX86_BUILTIN_VPCOMGTUB,
23951 IX86_BUILTIN_VPCOMGEUB,
23952 IX86_BUILTIN_VPCOMFALSEUB,
23953 IX86_BUILTIN_VPCOMTRUEUB,
23955 IX86_BUILTIN_VPCOMEQUW,
23956 IX86_BUILTIN_VPCOMNEUW,
23957 IX86_BUILTIN_VPCOMLTUW,
23958 IX86_BUILTIN_VPCOMLEUW,
23959 IX86_BUILTIN_VPCOMGTUW,
23960 IX86_BUILTIN_VPCOMGEUW,
23961 IX86_BUILTIN_VPCOMFALSEUW,
23962 IX86_BUILTIN_VPCOMTRUEUW,
23964 IX86_BUILTIN_VPCOMEQUD,
23965 IX86_BUILTIN_VPCOMNEUD,
23966 IX86_BUILTIN_VPCOMLTUD,
23967 IX86_BUILTIN_VPCOMLEUD,
23968 IX86_BUILTIN_VPCOMGTUD,
23969 IX86_BUILTIN_VPCOMGEUD,
23970 IX86_BUILTIN_VPCOMFALSEUD,
23971 IX86_BUILTIN_VPCOMTRUEUD,
23973 IX86_BUILTIN_VPCOMEQUQ,
23974 IX86_BUILTIN_VPCOMNEUQ,
23975 IX86_BUILTIN_VPCOMLTUQ,
23976 IX86_BUILTIN_VPCOMLEUQ,
23977 IX86_BUILTIN_VPCOMGTUQ,
23978 IX86_BUILTIN_VPCOMGEUQ,
23979 IX86_BUILTIN_VPCOMFALSEUQ,
23980 IX86_BUILTIN_VPCOMTRUEUQ,
23982 IX86_BUILTIN_VPCOMEQB,
23983 IX86_BUILTIN_VPCOMNEB,
23984 IX86_BUILTIN_VPCOMLTB,
23985 IX86_BUILTIN_VPCOMLEB,
23986 IX86_BUILTIN_VPCOMGTB,
23987 IX86_BUILTIN_VPCOMGEB,
23988 IX86_BUILTIN_VPCOMFALSEB,
23989 IX86_BUILTIN_VPCOMTRUEB,
23991 IX86_BUILTIN_VPCOMEQW,
23992 IX86_BUILTIN_VPCOMNEW,
23993 IX86_BUILTIN_VPCOMLTW,
23994 IX86_BUILTIN_VPCOMLEW,
23995 IX86_BUILTIN_VPCOMGTW,
23996 IX86_BUILTIN_VPCOMGEW,
23997 IX86_BUILTIN_VPCOMFALSEW,
23998 IX86_BUILTIN_VPCOMTRUEW,
24000 IX86_BUILTIN_VPCOMEQD,
24001 IX86_BUILTIN_VPCOMNED,
24002 IX86_BUILTIN_VPCOMLTD,
24003 IX86_BUILTIN_VPCOMLED,
24004 IX86_BUILTIN_VPCOMGTD,
24005 IX86_BUILTIN_VPCOMGED,
24006 IX86_BUILTIN_VPCOMFALSED,
24007 IX86_BUILTIN_VPCOMTRUED,
24009 IX86_BUILTIN_VPCOMEQQ,
24010 IX86_BUILTIN_VPCOMNEQ,
24011 IX86_BUILTIN_VPCOMLTQ,
24012 IX86_BUILTIN_VPCOMLEQ,
24013 IX86_BUILTIN_VPCOMGTQ,
24014 IX86_BUILTIN_VPCOMGEQ,
24015 IX86_BUILTIN_VPCOMFALSEQ,
24016 IX86_BUILTIN_VPCOMTRUEQ,
24018 /* LWP instructions. */
24019 IX86_BUILTIN_LLWPCB,
24020 IX86_BUILTIN_SLWPCB,
24021 IX86_BUILTIN_LWPVAL32,
24022 IX86_BUILTIN_LWPVAL64,
24023 IX86_BUILTIN_LWPINS32,
24024 IX86_BUILTIN_LWPINS64,
24028 /* BMI instructions. */
24029 IX86_BUILTIN_BEXTR32,
24030 IX86_BUILTIN_BEXTR64,
24033 /* TBM instructions. */
24034 IX86_BUILTIN_BEXTRI32,
24035 IX86_BUILTIN_BEXTRI64,
24038 /* FSGSBASE instructions. */
24039 IX86_BUILTIN_RDFSBASE32,
24040 IX86_BUILTIN_RDFSBASE64,
24041 IX86_BUILTIN_RDGSBASE32,
24042 IX86_BUILTIN_RDGSBASE64,
24043 IX86_BUILTIN_WRFSBASE32,
24044 IX86_BUILTIN_WRFSBASE64,
24045 IX86_BUILTIN_WRGSBASE32,
24046 IX86_BUILTIN_WRGSBASE64,
24048 /* RDRND instructions. */
24049 IX86_BUILTIN_RDRAND16_STEP,
24050 IX86_BUILTIN_RDRAND32_STEP,
24051 IX86_BUILTIN_RDRAND64_STEP,
24053 /* F16C instructions. */
24054 IX86_BUILTIN_CVTPH2PS,
24055 IX86_BUILTIN_CVTPH2PS256,
24056 IX86_BUILTIN_CVTPS2PH,
24057 IX86_BUILTIN_CVTPS2PH256,
24059 /* CFString built-in for darwin */
24060 IX86_BUILTIN_CFSTRING,
24065 /* Table for the ix86 builtin decls. */
24066 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
24068 /* Table of all of the builtin functions that are possible with different ISA's
24069 but are waiting to be built until a function is declared to use that
24071 struct builtin_isa {
24072 const char *name; /* function name */
24073 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
24074 int isa; /* isa_flags this builtin is defined for */
24075 bool const_p; /* true if the declaration is constant */
24076 bool set_and_not_built_p;
24079 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
24082 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
24083 of which isa_flags to use in the ix86_builtins_isa array. Stores the
24084 function decl in the ix86_builtins array. Returns the function decl or
24085 NULL_TREE, if the builtin was not added.
24087 If the front end has a special hook for builtin functions, delay adding
24088 builtin functions that aren't in the current ISA until the ISA is changed
24089 with function specific optimization. Doing so, can save about 300K for the
24090 default compiler. When the builtin is expanded, check at that time whether
24093 If the front end doesn't have a special hook, record all builtins, even if
24094 it isn't an instruction set in the current ISA in case the user uses
24095 function specific options for a different ISA, so that we don't get scope
24096 errors if a builtin is added in the middle of a function scope. */
24099 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
24100 enum ix86_builtins code)
24102 tree decl = NULL_TREE;
24104 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
24106 ix86_builtins_isa[(int) code].isa = mask;
24108 mask &= ~OPTION_MASK_ISA_64BIT;
24110 || (mask & ix86_isa_flags) != 0
24111 || (lang_hooks.builtin_function
24112 == lang_hooks.builtin_function_ext_scope))
24115 tree type = ix86_get_builtin_func_type (tcode);
24116 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
24118 ix86_builtins[(int) code] = decl;
24119 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
24123 ix86_builtins[(int) code] = NULL_TREE;
24124 ix86_builtins_isa[(int) code].tcode = tcode;
24125 ix86_builtins_isa[(int) code].name = name;
24126 ix86_builtins_isa[(int) code].const_p = false;
24127 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
24134 /* Like def_builtin, but also marks the function decl "const". */
24137 def_builtin_const (int mask, const char *name,
24138 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
24140 tree decl = def_builtin (mask, name, tcode, code);
24142 TREE_READONLY (decl) = 1;
24144 ix86_builtins_isa[(int) code].const_p = true;
24149 /* Add any new builtin functions for a given ISA that may not have been
24150 declared. This saves a bit of space compared to adding all of the
24151 declarations to the tree, even if we didn't use them. */
24154 ix86_add_new_builtins (int isa)
24158 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
24160 if ((ix86_builtins_isa[i].isa & isa) != 0
24161 && ix86_builtins_isa[i].set_and_not_built_p)
24165 /* Don't define the builtin again. */
24166 ix86_builtins_isa[i].set_and_not_built_p = false;
24168 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
24169 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
24170 type, i, BUILT_IN_MD, NULL,
24173 ix86_builtins[i] = decl;
24174 if (ix86_builtins_isa[i].const_p)
24175 TREE_READONLY (decl) = 1;
24180 /* Bits for builtin_description.flag. */
24182 /* Set when we don't support the comparison natively, and should
24183 swap_comparison in order to support it. */
24184 #define BUILTIN_DESC_SWAP_OPERANDS 1
24186 struct builtin_description
24188 const unsigned int mask;
24189 const enum insn_code icode;
24190 const char *const name;
24191 const enum ix86_builtins code;
24192 const enum rtx_code comparison;
24196 static const struct builtin_description bdesc_comi[] =
24198 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
24199 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
24200 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
24201 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
24202 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
24203 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
24204 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
24205 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
24206 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
24207 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
24208 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
24209 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
24210 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
24211 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
24212 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
24213 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
24214 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
24215 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
24216 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
24217 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
24218 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
24219 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
24220 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
24221 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
24224 static const struct builtin_description bdesc_pcmpestr[] =
24227 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
24228 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
24229 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
24230 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
24231 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
24232 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
24233 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
24236 static const struct builtin_description bdesc_pcmpistr[] =
24239 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
24240 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
24241 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
24242 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
24243 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
24244 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
24245 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
24248 /* Special builtins with variable number of arguments. */
24249 static const struct builtin_description bdesc_special_args[] =
24251 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
24252 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
24253 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_pause, "__builtin_ia32_pause", IX86_BUILTIN_PAUSE, UNKNOWN, (int) VOID_FTYPE_VOID },
24256 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24259 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
24262 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24263 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24264 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24266 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24267 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
24268 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24269 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
24271 /* SSE or 3DNow!A */
24272 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24273 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
24276 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24277 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
24278 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24279 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
24280 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24281 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
24282 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
24283 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
24284 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24286 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24287 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
24290 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
24293 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
24296 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
24297 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
24300 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
24301 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
24303 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
24304 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24305 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24306 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
24307 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
24309 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
24310 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
24311 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24312 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24313 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24314 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
24315 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
24317 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
24318 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
24319 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
24321 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DI },
24322 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SI },
24323 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DI },
24324 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SI },
24325 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DI_V2DF },
24326 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SI_V4SF },
24327 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DI_V4DF },
24328 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SI_V8SF },
24330 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
24331 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
24332 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
24333 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
24334 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
24335 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
24338 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasesi, "__builtin_ia32_rdfsbase32", IX86_BUILTIN_RDFSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24339 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdfsbasedi, "__builtin_ia32_rdfsbase64", IX86_BUILTIN_RDFSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24340 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasesi, "__builtin_ia32_rdgsbase32", IX86_BUILTIN_RDGSBASE32, UNKNOWN, (int) UNSIGNED_FTYPE_VOID },
24341 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_rdgsbasedi, "__builtin_ia32_rdgsbase64", IX86_BUILTIN_RDGSBASE64, UNKNOWN, (int) UINT64_FTYPE_VOID },
24342 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasesi, "__builtin_ia32_wrfsbase32", IX86_BUILTIN_WRFSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24343 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrfsbasedi, "__builtin_ia32_wrfsbase64", IX86_BUILTIN_WRFSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24344 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasesi, "__builtin_ia32_wrgsbase32", IX86_BUILTIN_WRGSBASE32, UNKNOWN, (int) VOID_FTYPE_UNSIGNED },
24345 { OPTION_MASK_ISA_FSGSBASE | OPTION_MASK_ISA_64BIT, CODE_FOR_wrgsbasedi, "__builtin_ia32_wrgsbase64", IX86_BUILTIN_WRGSBASE64, UNKNOWN, (int) VOID_FTYPE_UINT64 },
24348 /* Builtins with variable number of arguments. */
24349 static const struct builtin_description bdesc_args[] =
24351 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
24352 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
24353 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
24354 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24355 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24356 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
24357 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
24360 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24361 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24362 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24363 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24364 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24365 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24367 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24368 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24369 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24370 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24371 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24372 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24373 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24374 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24376 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24377 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24379 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24380 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24381 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24382 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24384 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24385 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24386 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24387 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24388 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24389 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24391 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24392 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24393 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24394 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24395 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
24396 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
24398 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24399 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
24400 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
24402 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
24404 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24405 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24406 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24407 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24408 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24409 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24411 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24412 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24413 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
24414 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24415 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24416 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
24418 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
24419 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
24420 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
24421 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
24424 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24425 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24426 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24427 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24429 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24430 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24431 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24432 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24433 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24434 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
24435 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24436 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24437 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24438 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24439 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24440 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24441 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24442 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24443 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24446 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
24447 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
24448 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
24449 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
24450 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24451 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
24454 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
24455 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24456 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24457 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24458 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24459 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24460 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24461 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24462 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24463 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
24464 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
24465 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
24467 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24469 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24470 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24471 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24472 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24473 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24474 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24475 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24476 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24478 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24479 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24480 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24481 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24482 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24483 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24484 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24485 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24486 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24487 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24488 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
24489 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24490 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
24491 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
24492 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
24493 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24494 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
24495 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
24496 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
24497 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24498 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
24499 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
24501 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24502 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24503 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24504 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24506 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24507 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24508 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24509 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24511 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24513 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24514 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24515 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24516 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24517 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24519 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
24520 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
24521 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
24523 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
24525 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24526 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24527 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
24529 /* SSE MMX or 3Dnow!A */
24530 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24531 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24532 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24534 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24535 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24536 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24537 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24539 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
24540 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
24542 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
24545 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24547 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
24548 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
24549 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
24550 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
24551 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
24552 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
24553 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
24554 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
24555 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
24556 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
24557 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
24558 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
24560 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
24561 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
24562 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
24563 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
24564 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
24565 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
24567 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
24568 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
24569 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
24570 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
24571 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
24573 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
24575 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
24576 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
24577 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
24578 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
24580 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
24581 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
24582 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
24584 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24585 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24586 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24587 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24588 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24589 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24590 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24591 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24593 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
24594 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
24595 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
24596 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
24597 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
24598 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
24599 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
24600 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
24601 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
24602 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
24603 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
24604 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
24605 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
24606 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
24607 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
24608 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
24609 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
24610 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
24611 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
24612 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
24614 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24615 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24616 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24617 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24619 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24620 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24621 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24622 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24624 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24626 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24627 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24628 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24630 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
24632 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24633 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24634 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24635 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24636 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24637 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24638 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24639 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24641 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24642 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24643 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24644 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24645 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24646 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24647 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24648 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24650 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24651 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
24653 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24654 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24655 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24656 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24658 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24659 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24661 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24662 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24663 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24664 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24665 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24666 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24668 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24669 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24670 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24671 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24673 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24674 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24675 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24676 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24677 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24678 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24679 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24680 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24682 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
24683 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
24684 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
24686 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24687 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
24689 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
24690 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
24692 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
24694 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
24695 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
24696 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
24697 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
24699 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
24700 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
24701 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
24702 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
24703 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
24704 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
24705 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
24707 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
24708 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
24709 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
24710 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
24711 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
24712 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
24713 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
24715 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
24716 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
24717 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
24718 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
24720 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
24721 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
24722 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
24724 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
24726 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
24727 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
24729 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
24732 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
24733 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
24736 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
24737 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
24739 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24740 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24741 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24742 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24743 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
24744 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
24747 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
24748 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
24749 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
24750 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
24751 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
24752 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
24754 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24755 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24756 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24757 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24758 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24759 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24760 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24761 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24762 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24763 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24764 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24765 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24766 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
24767 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
24768 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24769 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24770 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24771 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24772 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24773 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
24774 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24775 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
24776 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24777 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
24780 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
24781 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
24784 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24785 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24786 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
24787 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
24788 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24789 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24790 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24791 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
24792 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
24793 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
24795 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
24796 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
24797 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
24798 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
24799 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
24800 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_sign_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
24801 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
24802 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
24803 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
24804 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
24805 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
24806 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
24807 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
24809 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
24810 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24811 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24812 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24813 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24814 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24815 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
24816 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24817 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24818 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
24819 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
24820 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
24823 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
24824 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
24825 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24826 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24828 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_floorpd", IX86_BUILTIN_FLOORPD, (enum rtx_code) ROUND_FLOOR, (int) V2DF_FTYPE_V2DF_ROUND },
24829 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_ceilpd", IX86_BUILTIN_CEILPD, (enum rtx_code) ROUND_CEIL, (int) V2DF_FTYPE_V2DF_ROUND },
24830 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_truncpd", IX86_BUILTIN_TRUNCPD, (enum rtx_code) ROUND_TRUNC, (int) V2DF_FTYPE_V2DF_ROUND },
24831 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_rintpd", IX86_BUILTIN_RINTPD, (enum rtx_code) ROUND_MXCSR, (int) V2DF_FTYPE_V2DF_ROUND },
24833 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_floorps", IX86_BUILTIN_FLOORPS, (enum rtx_code) ROUND_FLOOR, (int) V4SF_FTYPE_V4SF_ROUND },
24834 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_ceilps", IX86_BUILTIN_CEILPS, (enum rtx_code) ROUND_CEIL, (int) V4SF_FTYPE_V4SF_ROUND },
24835 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_truncps", IX86_BUILTIN_TRUNCPS, (enum rtx_code) ROUND_TRUNC, (int) V4SF_FTYPE_V4SF_ROUND },
24836 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_rintps", IX86_BUILTIN_RINTPS, (enum rtx_code) ROUND_MXCSR, (int) V4SF_FTYPE_V4SF_ROUND },
24838 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
24839 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
24840 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
24843 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24844 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
24845 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
24846 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
24847 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
24850 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
24851 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
24852 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
24853 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24856 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
24857 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
24859 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24860 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24861 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24862 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
24865 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
24868 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24869 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24870 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24871 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24872 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24873 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24874 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24875 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24876 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24877 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24878 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24879 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24880 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24881 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24882 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24883 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24884 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24885 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24886 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24887 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24888 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24889 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24890 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24891 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24892 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24893 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24895 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
24896 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
24897 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
24898 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
24900 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
24901 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24902 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
24903 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
24904 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24905 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
24906 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24907 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24908 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vmcmpv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24909 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
24910 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
24911 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
24912 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24913 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
24914 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
24915 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
24916 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
24917 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
24918 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
24919 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
24920 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
24921 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
24922 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
24923 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
24924 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
24925 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
24926 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
24927 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
24928 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
24929 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
24930 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
24931 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
24932 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
24933 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
24935 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24936 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24937 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
24939 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
24940 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24941 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24942 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24943 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24945 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
24947 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
24948 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
24950 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_floorpd256", IX86_BUILTIN_FLOORPD256, (enum rtx_code) ROUND_FLOOR, (int) V4DF_FTYPE_V4DF_ROUND },
24951 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_ceilpd256", IX86_BUILTIN_CEILPD256, (enum rtx_code) ROUND_CEIL, (int) V4DF_FTYPE_V4DF_ROUND },
24952 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_truncpd256", IX86_BUILTIN_TRUNCPD256, (enum rtx_code) ROUND_TRUNC, (int) V4DF_FTYPE_V4DF_ROUND },
24953 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_rintpd256", IX86_BUILTIN_RINTPD256, (enum rtx_code) ROUND_MXCSR, (int) V4DF_FTYPE_V4DF_ROUND },
24955 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_floorps256", IX86_BUILTIN_FLOORPS256, (enum rtx_code) ROUND_FLOOR, (int) V8SF_FTYPE_V8SF_ROUND },
24956 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_ceilps256", IX86_BUILTIN_CEILPS256, (enum rtx_code) ROUND_CEIL, (int) V8SF_FTYPE_V8SF_ROUND },
24957 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_truncps256", IX86_BUILTIN_TRUNCPS256, (enum rtx_code) ROUND_TRUNC, (int) V8SF_FTYPE_V8SF_ROUND },
24958 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_rintps256", IX86_BUILTIN_RINTPS256, (enum rtx_code) ROUND_MXCSR, (int) V8SF_FTYPE_V8SF_ROUND },
24960 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24961 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24962 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24963 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24965 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
24966 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
24967 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
24968 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
24969 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
24970 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
24972 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
24973 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
24974 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
24975 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
24976 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
24977 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
24978 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
24979 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
24980 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
24981 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
24982 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
24983 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
24984 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
24985 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
24986 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
24988 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
24989 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
24991 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv8sf3, "__builtin_ia32_copysignps256", IX86_BUILTIN_CPYSGNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
24992 { OPTION_MASK_ISA_AVX, CODE_FOR_copysignv4df3, "__builtin_ia32_copysignpd256", IX86_BUILTIN_CPYSGNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
24994 { OPTION_MASK_ISA_LZCNT, CODE_FOR_clzhi2_lzcnt, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
24997 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_si, "__builtin_ia32_bextr_u32", IX86_BUILTIN_BEXTR32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
24998 { OPTION_MASK_ISA_BMI, CODE_FOR_bmi_bextr_di, "__builtin_ia32_bextr_u64", IX86_BUILTIN_BEXTR64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
24999 { OPTION_MASK_ISA_BMI, CODE_FOR_ctzhi2, "__builtin_ctzs", IX86_BUILTIN_CTZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
25002 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_si, "__builtin_ia32_bextri_u32", IX86_BUILTIN_BEXTRI32, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
25003 { OPTION_MASK_ISA_TBM, CODE_FOR_tbm_bextri_di, "__builtin_ia32_bextri_u64", IX86_BUILTIN_BEXTRI64, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
25006 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps, "__builtin_ia32_vcvtph2ps", IX86_BUILTIN_CVTPH2PS, UNKNOWN, (int) V4SF_FTYPE_V8HI },
25007 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtph2ps256, "__builtin_ia32_vcvtph2ps256", IX86_BUILTIN_CVTPH2PS256, UNKNOWN, (int) V8SF_FTYPE_V8HI },
25008 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph, "__builtin_ia32_vcvtps2ph", IX86_BUILTIN_CVTPS2PH, UNKNOWN, (int) V8HI_FTYPE_V4SF_INT },
25009 { OPTION_MASK_ISA_F16C, CODE_FOR_vcvtps2ph256, "__builtin_ia32_vcvtps2ph256", IX86_BUILTIN_CVTPS2PH256, UNKNOWN, (int) V8HI_FTYPE_V8SF_INT },
25012 /* FMA4 and XOP. */
25013 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
25014 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
25015 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
25016 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
25017 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
25018 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
25019 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
25020 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
25021 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
25022 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
25023 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
25024 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
25025 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
25026 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
25027 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
25028 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
25029 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
25030 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
25031 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
25032 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
25033 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
25034 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
25035 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
25036 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
25037 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
25038 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
25039 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
25040 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
25041 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
25042 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
25043 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
25044 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
25045 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
25046 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
25047 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
25048 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
25049 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
25050 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
25051 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
25052 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
25053 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
25054 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
25055 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
25056 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
25057 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
25058 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
25059 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
25060 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
25061 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
25062 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
25063 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
25064 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
25066 static const struct builtin_description bdesc_multi_arg[] =
25068 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v4sf,
25069 "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS,
25070 UNKNOWN, (int)MULTI_ARG_3_SF },
25071 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmadd_v2df,
25072 "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD,
25073 UNKNOWN, (int)MULTI_ARG_3_DF },
25075 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4sf,
25076 "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS,
25077 UNKNOWN, (int)MULTI_ARG_3_SF },
25078 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v2df,
25079 "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD,
25080 UNKNOWN, (int)MULTI_ARG_3_DF },
25081 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v8sf,
25082 "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256,
25083 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25084 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmadd_v4df,
25085 "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256,
25086 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25088 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4sf,
25089 "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS,
25090 UNKNOWN, (int)MULTI_ARG_3_SF },
25091 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v2df,
25092 "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD,
25093 UNKNOWN, (int)MULTI_ARG_3_DF },
25094 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v8sf,
25095 "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256,
25096 UNKNOWN, (int)MULTI_ARG_3_SF2 },
25097 { OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_FMA4, CODE_FOR_fmaddsub_v4df,
25098 "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256,
25099 UNKNOWN, (int)MULTI_ARG_3_DF2 },
25101 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
25102 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
25103 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
25104 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
25105 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
25106 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
25107 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
25109 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25110 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
25111 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
25112 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
25113 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
25114 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
25115 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
25117 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
25119 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25120 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
25121 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25122 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25123 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25124 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
25125 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25126 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25127 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25128 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
25129 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25130 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
25132 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25133 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
25134 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
25135 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
25136 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
25137 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
25138 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
25139 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
25140 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25141 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
25142 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
25143 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
25144 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
25145 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
25146 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
25147 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
25149 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
25150 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
25151 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
25152 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
25153 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
25154 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
25156 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25157 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25158 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25159 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25160 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25161 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25162 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25163 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
25164 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
25165 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25166 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
25167 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25168 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
25169 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
25170 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
25172 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
25173 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25174 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
25175 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
25176 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
25177 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
25178 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
25180 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
25181 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25182 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
25183 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
25184 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
25185 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
25186 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
25188 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
25189 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25190 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
25191 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
25192 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
25193 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
25194 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
25196 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25197 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25198 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
25199 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
25200 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
25201 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
25202 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
25204 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
25205 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25206 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
25207 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
25208 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
25209 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
25210 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
25212 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
25213 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25214 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
25215 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
25216 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
25217 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
25218 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
25220 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
25221 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25222 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
25223 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
25224 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
25225 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
25226 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
25228 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
25229 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25230 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
25231 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
25232 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
25233 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
25234 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
25236 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25237 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25238 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25239 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25240 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
25241 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
25242 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
25243 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
25245 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25246 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25247 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25248 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25249 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
25250 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
25251 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
25252 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
25254 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
25255 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
25256 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
25257 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
25261 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
25262 in the current target ISA to allow the user to compile particular modules
25263 with different target specific options that differ from the command line
25266 ix86_init_mmx_sse_builtins (void)
25268 const struct builtin_description * d;
25269 enum ix86_builtin_func_type ftype;
25272 /* Add all special builtins with variable number of operands. */
25273 for (i = 0, d = bdesc_special_args;
25274 i < ARRAY_SIZE (bdesc_special_args);
25280 ftype = (enum ix86_builtin_func_type) d->flag;
25281 def_builtin (d->mask, d->name, ftype, d->code);
25284 /* Add all builtins with variable number of operands. */
25285 for (i = 0, d = bdesc_args;
25286 i < ARRAY_SIZE (bdesc_args);
25292 ftype = (enum ix86_builtin_func_type) d->flag;
25293 def_builtin_const (d->mask, d->name, ftype, d->code);
25296 /* pcmpestr[im] insns. */
25297 for (i = 0, d = bdesc_pcmpestr;
25298 i < ARRAY_SIZE (bdesc_pcmpestr);
25301 if (d->code == IX86_BUILTIN_PCMPESTRM128)
25302 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
25304 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
25305 def_builtin_const (d->mask, d->name, ftype, d->code);
25308 /* pcmpistr[im] insns. */
25309 for (i = 0, d = bdesc_pcmpistr;
25310 i < ARRAY_SIZE (bdesc_pcmpistr);
25313 if (d->code == IX86_BUILTIN_PCMPISTRM128)
25314 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
25316 ftype = INT_FTYPE_V16QI_V16QI_INT;
25317 def_builtin_const (d->mask, d->name, ftype, d->code);
25320 /* comi/ucomi insns. */
25321 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
25323 if (d->mask == OPTION_MASK_ISA_SSE2)
25324 ftype = INT_FTYPE_V2DF_V2DF;
25326 ftype = INT_FTYPE_V4SF_V4SF;
25327 def_builtin_const (d->mask, d->name, ftype, d->code);
25331 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
25332 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
25333 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
25334 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
25336 /* SSE or 3DNow!A */
25337 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25338 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
25339 IX86_BUILTIN_MASKMOVQ);
25342 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
25343 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
25345 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
25346 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
25347 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
25348 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
25351 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
25352 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
25353 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
25354 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
25357 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
25358 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
25359 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
25360 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
25361 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
25362 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
25363 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
25364 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
25365 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
25366 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
25367 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
25368 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
25371 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
25372 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
25375 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand16_step",
25376 INT_FTYPE_PUSHORT, IX86_BUILTIN_RDRAND16_STEP);
25377 def_builtin (OPTION_MASK_ISA_RDRND, "__builtin_ia32_rdrand32_step",
25378 INT_FTYPE_PUNSIGNED, IX86_BUILTIN_RDRAND32_STEP);
25379 def_builtin (OPTION_MASK_ISA_RDRND | OPTION_MASK_ISA_64BIT,
25380 "__builtin_ia32_rdrand64_step", INT_FTYPE_PULONGLONG,
25381 IX86_BUILTIN_RDRAND64_STEP);
25383 /* MMX access to the vec_init patterns. */
25384 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
25385 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
25387 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
25388 V4HI_FTYPE_HI_HI_HI_HI,
25389 IX86_BUILTIN_VEC_INIT_V4HI);
25391 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
25392 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
25393 IX86_BUILTIN_VEC_INIT_V8QI);
25395 /* Access to the vec_extract patterns. */
25396 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
25397 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
25398 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
25399 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
25400 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
25401 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
25402 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
25403 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
25404 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
25405 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
25407 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25408 "__builtin_ia32_vec_ext_v4hi",
25409 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
25411 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
25412 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
25414 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
25415 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
25417 /* Access to the vec_set patterns. */
25418 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
25419 "__builtin_ia32_vec_set_v2di",
25420 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
25422 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
25423 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
25425 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
25426 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
25428 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
25429 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
25431 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
25432 "__builtin_ia32_vec_set_v4hi",
25433 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
25435 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
25436 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
25438 /* Add FMA4 multi-arg argument instructions */
25439 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
25444 ftype = (enum ix86_builtin_func_type) d->flag;
25445 def_builtin_const (d->mask, d->name, ftype, d->code);
25449 /* Internal method for ix86_init_builtins. */
25452 ix86_init_builtins_va_builtins_abi (void)
25454 tree ms_va_ref, sysv_va_ref;
25455 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
25456 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
25457 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
25458 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
25462 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
25463 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
25464 ms_va_ref = build_reference_type (ms_va_list_type_node);
25466 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
25469 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25470 fnvoid_va_start_ms =
25471 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
25472 fnvoid_va_end_sysv =
25473 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
25474 fnvoid_va_start_sysv =
25475 build_varargs_function_type_list (void_type_node, sysv_va_ref,
25477 fnvoid_va_copy_ms =
25478 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
25480 fnvoid_va_copy_sysv =
25481 build_function_type_list (void_type_node, sysv_va_ref,
25482 sysv_va_ref, NULL_TREE);
25484 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
25485 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
25486 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
25487 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
25488 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
25489 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
25490 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
25491 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25492 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
25493 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25494 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
25495 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
25499 ix86_init_builtin_types (void)
25501 tree float128_type_node, float80_type_node;
25503 /* The __float80 type. */
25504 float80_type_node = long_double_type_node;
25505 if (TYPE_MODE (float80_type_node) != XFmode)
25507 /* The __float80 type. */
25508 float80_type_node = make_node (REAL_TYPE);
25510 TYPE_PRECISION (float80_type_node) = 80;
25511 layout_type (float80_type_node);
25513 lang_hooks.types.register_builtin_type (float80_type_node, "__float80");
25515 /* The __float128 type. */
25516 float128_type_node = make_node (REAL_TYPE);
25517 TYPE_PRECISION (float128_type_node) = 128;
25518 layout_type (float128_type_node);
25519 lang_hooks.types.register_builtin_type (float128_type_node, "__float128");
25521 /* This macro is built by i386-builtin-types.awk. */
25522 DEFINE_BUILTIN_PRIMITIVE_TYPES;
25526 ix86_init_builtins (void)
25530 ix86_init_builtin_types ();
25532 /* TFmode support builtins. */
25533 def_builtin_const (0, "__builtin_infq",
25534 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
25535 def_builtin_const (0, "__builtin_huge_valq",
25536 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
25538 /* We will expand them to normal call if SSE2 isn't available since
25539 they are used by libgcc. */
25540 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
25541 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
25542 BUILT_IN_MD, "__fabstf2", NULL_TREE);
25543 TREE_READONLY (t) = 1;
25544 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
25546 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
25547 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
25548 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
25549 TREE_READONLY (t) = 1;
25550 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
25552 ix86_init_mmx_sse_builtins ();
25555 ix86_init_builtins_va_builtins_abi ();
25557 #ifdef SUBTARGET_INIT_BUILTINS
25558 SUBTARGET_INIT_BUILTINS;
25562 /* Return the ix86 builtin for CODE. */
25565 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
25567 if (code >= IX86_BUILTIN_MAX)
25568 return error_mark_node;
25570 return ix86_builtins[code];
25573 /* Errors in the source file can cause expand_expr to return const0_rtx
25574 where we expect a vector. To avoid crashing, use one of the vector
25575 clear instructions. */
25577 safe_vector_operand (rtx x, enum machine_mode mode)
25579 if (x == const0_rtx)
25580 x = CONST0_RTX (mode);
25584 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
25587 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
25590 tree arg0 = CALL_EXPR_ARG (exp, 0);
25591 tree arg1 = CALL_EXPR_ARG (exp, 1);
25592 rtx op0 = expand_normal (arg0);
25593 rtx op1 = expand_normal (arg1);
25594 enum machine_mode tmode = insn_data[icode].operand[0].mode;
25595 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
25596 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
25598 if (VECTOR_MODE_P (mode0))
25599 op0 = safe_vector_operand (op0, mode0);
25600 if (VECTOR_MODE_P (mode1))
25601 op1 = safe_vector_operand (op1, mode1);
25603 if (optimize || !target
25604 || GET_MODE (target) != tmode
25605 || !insn_data[icode].operand[0].predicate (target, tmode))
25606 target = gen_reg_rtx (tmode);
25608 if (GET_MODE (op1) == SImode && mode1 == TImode)
25610 rtx x = gen_reg_rtx (V4SImode);
25611 emit_insn (gen_sse2_loadd (x, op1));
25612 op1 = gen_lowpart (TImode, x);
25615 if (!insn_data[icode].operand[1].predicate (op0, mode0))
25616 op0 = copy_to_mode_reg (mode0, op0);
25617 if (!insn_data[icode].operand[2].predicate (op1, mode1))
25618 op1 = copy_to_mode_reg (mode1, op1);
25620 pat = GEN_FCN (icode) (target, op0, op1);
25629 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
25632 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
25633 enum ix86_builtin_func_type m_type,
25634 enum rtx_code sub_code)
25639 bool comparison_p = false;
25641 bool last_arg_constant = false;
25642 int num_memory = 0;
25645 enum machine_mode mode;
25648 enum machine_mode tmode = insn_data[icode].operand[0].mode;
25652 case MULTI_ARG_4_DF2_DI_I:
25653 case MULTI_ARG_4_DF2_DI_I1:
25654 case MULTI_ARG_4_SF2_SI_I:
25655 case MULTI_ARG_4_SF2_SI_I1:
25657 last_arg_constant = true;
25660 case MULTI_ARG_3_SF:
25661 case MULTI_ARG_3_DF:
25662 case MULTI_ARG_3_SF2:
25663 case MULTI_ARG_3_DF2:
25664 case MULTI_ARG_3_DI:
25665 case MULTI_ARG_3_SI:
25666 case MULTI_ARG_3_SI_DI:
25667 case MULTI_ARG_3_HI:
25668 case MULTI_ARG_3_HI_SI:
25669 case MULTI_ARG_3_QI:
25670 case MULTI_ARG_3_DI2:
25671 case MULTI_ARG_3_SI2:
25672 case MULTI_ARG_3_HI2:
25673 case MULTI_ARG_3_QI2:
25677 case MULTI_ARG_2_SF:
25678 case MULTI_ARG_2_DF:
25679 case MULTI_ARG_2_DI:
25680 case MULTI_ARG_2_SI:
25681 case MULTI_ARG_2_HI:
25682 case MULTI_ARG_2_QI:
25686 case MULTI_ARG_2_DI_IMM:
25687 case MULTI_ARG_2_SI_IMM:
25688 case MULTI_ARG_2_HI_IMM:
25689 case MULTI_ARG_2_QI_IMM:
25691 last_arg_constant = true;
25694 case MULTI_ARG_1_SF:
25695 case MULTI_ARG_1_DF:
25696 case MULTI_ARG_1_SF2:
25697 case MULTI_ARG_1_DF2:
25698 case MULTI_ARG_1_DI:
25699 case MULTI_ARG_1_SI:
25700 case MULTI_ARG_1_HI:
25701 case MULTI_ARG_1_QI:
25702 case MULTI_ARG_1_SI_DI:
25703 case MULTI_ARG_1_HI_DI:
25704 case MULTI_ARG_1_HI_SI:
25705 case MULTI_ARG_1_QI_DI:
25706 case MULTI_ARG_1_QI_SI:
25707 case MULTI_ARG_1_QI_HI:
25711 case MULTI_ARG_2_DI_CMP:
25712 case MULTI_ARG_2_SI_CMP:
25713 case MULTI_ARG_2_HI_CMP:
25714 case MULTI_ARG_2_QI_CMP:
25716 comparison_p = true;
25719 case MULTI_ARG_2_SF_TF:
25720 case MULTI_ARG_2_DF_TF:
25721 case MULTI_ARG_2_DI_TF:
25722 case MULTI_ARG_2_SI_TF:
25723 case MULTI_ARG_2_HI_TF:
25724 case MULTI_ARG_2_QI_TF:
25730 gcc_unreachable ();
25733 if (optimize || !target
25734 || GET_MODE (target) != tmode
25735 || !insn_data[icode].operand[0].predicate (target, tmode))
25736 target = gen_reg_rtx (tmode);
25738 gcc_assert (nargs <= 4);
25740 for (i = 0; i < nargs; i++)
25742 tree arg = CALL_EXPR_ARG (exp, i);
25743 rtx op = expand_normal (arg);
25744 int adjust = (comparison_p) ? 1 : 0;
25745 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
25747 if (last_arg_constant && i == nargs - 1)
25749 if (!insn_data[icode].operand[i + 1].predicate (op, mode))
25751 enum insn_code new_icode = icode;
25754 case CODE_FOR_xop_vpermil2v2df3:
25755 case CODE_FOR_xop_vpermil2v4sf3:
25756 case CODE_FOR_xop_vpermil2v4df3:
25757 case CODE_FOR_xop_vpermil2v8sf3:
25758 error ("the last argument must be a 2-bit immediate");
25759 return gen_reg_rtx (tmode);
25760 case CODE_FOR_xop_rotlv2di3:
25761 new_icode = CODE_FOR_rotlv2di3;
25763 case CODE_FOR_xop_rotlv4si3:
25764 new_icode = CODE_FOR_rotlv4si3;
25766 case CODE_FOR_xop_rotlv8hi3:
25767 new_icode = CODE_FOR_rotlv8hi3;
25769 case CODE_FOR_xop_rotlv16qi3:
25770 new_icode = CODE_FOR_rotlv16qi3;
25772 if (CONST_INT_P (op))
25774 int mask = GET_MODE_BITSIZE (GET_MODE_INNER (tmode)) - 1;
25775 op = GEN_INT (INTVAL (op) & mask);
25776 gcc_checking_assert
25777 (insn_data[icode].operand[i + 1].predicate (op, mode));
25781 gcc_checking_assert
25783 && insn_data[new_icode].operand[0].mode == tmode
25784 && insn_data[new_icode].operand[1].mode == tmode
25785 && insn_data[new_icode].operand[2].mode == mode
25786 && insn_data[new_icode].operand[0].predicate
25787 == insn_data[icode].operand[0].predicate
25788 && insn_data[new_icode].operand[1].predicate
25789 == insn_data[icode].operand[1].predicate);
25795 gcc_unreachable ();
25802 if (VECTOR_MODE_P (mode))
25803 op = safe_vector_operand (op, mode);
25805 /* If we aren't optimizing, only allow one memory operand to be
25807 if (memory_operand (op, mode))
25810 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
25813 || !insn_data[icode].operand[i+adjust+1].predicate (op, mode)
25815 op = force_reg (mode, op);
25819 args[i].mode = mode;
25825 pat = GEN_FCN (icode) (target, args[0].op);
25830 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
25831 GEN_INT ((int)sub_code));
25832 else if (! comparison_p)
25833 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
25836 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
25840 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
25845 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
25849 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
25853 gcc_unreachable ();
25863 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
25864 insns with vec_merge. */
25867 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
25871 tree arg0 = CALL_EXPR_ARG (exp, 0);
25872 rtx op1, op0 = expand_normal (arg0);
25873 enum machine_mode tmode = insn_data[icode].operand[0].mode;
25874 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
25876 if (optimize || !target
25877 || GET_MODE (target) != tmode
25878 || !insn_data[icode].operand[0].predicate (target, tmode))
25879 target = gen_reg_rtx (tmode);
25881 if (VECTOR_MODE_P (mode0))
25882 op0 = safe_vector_operand (op0, mode0);
25884 if ((optimize && !register_operand (op0, mode0))
25885 || !insn_data[icode].operand[1].predicate (op0, mode0))
25886 op0 = copy_to_mode_reg (mode0, op0);
25889 if (!insn_data[icode].operand[2].predicate (op1, mode0))
25890 op1 = copy_to_mode_reg (mode0, op1);
25892 pat = GEN_FCN (icode) (target, op0, op1);
25899 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
25902 ix86_expand_sse_compare (const struct builtin_description *d,
25903 tree exp, rtx target, bool swap)
25906 tree arg0 = CALL_EXPR_ARG (exp, 0);
25907 tree arg1 = CALL_EXPR_ARG (exp, 1);
25908 rtx op0 = expand_normal (arg0);
25909 rtx op1 = expand_normal (arg1);
25911 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
25912 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
25913 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
25914 enum rtx_code comparison = d->comparison;
25916 if (VECTOR_MODE_P (mode0))
25917 op0 = safe_vector_operand (op0, mode0);
25918 if (VECTOR_MODE_P (mode1))
25919 op1 = safe_vector_operand (op1, mode1);
25921 /* Swap operands if we have a comparison that isn't available in
25925 rtx tmp = gen_reg_rtx (mode1);
25926 emit_move_insn (tmp, op1);
25931 if (optimize || !target
25932 || GET_MODE (target) != tmode
25933 || !insn_data[d->icode].operand[0].predicate (target, tmode))
25934 target = gen_reg_rtx (tmode);
25936 if ((optimize && !register_operand (op0, mode0))
25937 || !insn_data[d->icode].operand[1].predicate (op0, mode0))
25938 op0 = copy_to_mode_reg (mode0, op0);
25939 if ((optimize && !register_operand (op1, mode1))
25940 || !insn_data[d->icode].operand[2].predicate (op1, mode1))
25941 op1 = copy_to_mode_reg (mode1, op1);
25943 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
25944 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
25951 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
25954 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
25958 tree arg0 = CALL_EXPR_ARG (exp, 0);
25959 tree arg1 = CALL_EXPR_ARG (exp, 1);
25960 rtx op0 = expand_normal (arg0);
25961 rtx op1 = expand_normal (arg1);
25962 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
25963 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
25964 enum rtx_code comparison = d->comparison;
25966 if (VECTOR_MODE_P (mode0))
25967 op0 = safe_vector_operand (op0, mode0);
25968 if (VECTOR_MODE_P (mode1))
25969 op1 = safe_vector_operand (op1, mode1);
25971 /* Swap operands if we have a comparison that isn't available in
25973 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
25980 target = gen_reg_rtx (SImode);
25981 emit_move_insn (target, const0_rtx);
25982 target = gen_rtx_SUBREG (QImode, target, 0);
25984 if ((optimize && !register_operand (op0, mode0))
25985 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
25986 op0 = copy_to_mode_reg (mode0, op0);
25987 if ((optimize && !register_operand (op1, mode1))
25988 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
25989 op1 = copy_to_mode_reg (mode1, op1);
25991 pat = GEN_FCN (d->icode) (op0, op1);
25995 emit_insn (gen_rtx_SET (VOIDmode,
25996 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
25997 gen_rtx_fmt_ee (comparison, QImode,
26001 return SUBREG_REG (target);
26004 /* Subroutine of ix86_expand_args_builtin to take care of round insns. */
26007 ix86_expand_sse_round (const struct builtin_description *d, tree exp,
26011 tree arg0 = CALL_EXPR_ARG (exp, 0);
26012 rtx op1, op0 = expand_normal (arg0);
26013 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
26014 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
26016 if (optimize || target == 0
26017 || GET_MODE (target) != tmode
26018 || !insn_data[d->icode].operand[0].predicate (target, tmode))
26019 target = gen_reg_rtx (tmode);
26021 if (VECTOR_MODE_P (mode0))
26022 op0 = safe_vector_operand (op0, mode0);
26024 if ((optimize && !register_operand (op0, mode0))
26025 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26026 op0 = copy_to_mode_reg (mode0, op0);
26028 op1 = GEN_INT (d->comparison);
26030 pat = GEN_FCN (d->icode) (target, op0, op1);
26037 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
26040 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
26044 tree arg0 = CALL_EXPR_ARG (exp, 0);
26045 tree arg1 = CALL_EXPR_ARG (exp, 1);
26046 rtx op0 = expand_normal (arg0);
26047 rtx op1 = expand_normal (arg1);
26048 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
26049 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
26050 enum rtx_code comparison = d->comparison;
26052 if (VECTOR_MODE_P (mode0))
26053 op0 = safe_vector_operand (op0, mode0);
26054 if (VECTOR_MODE_P (mode1))
26055 op1 = safe_vector_operand (op1, mode1);
26057 target = gen_reg_rtx (SImode);
26058 emit_move_insn (target, const0_rtx);
26059 target = gen_rtx_SUBREG (QImode, target, 0);
26061 if ((optimize && !register_operand (op0, mode0))
26062 || !insn_data[d->icode].operand[0].predicate (op0, mode0))
26063 op0 = copy_to_mode_reg (mode0, op0);
26064 if ((optimize && !register_operand (op1, mode1))
26065 || !insn_data[d->icode].operand[1].predicate (op1, mode1))
26066 op1 = copy_to_mode_reg (mode1, op1);
26068 pat = GEN_FCN (d->icode) (op0, op1);
26072 emit_insn (gen_rtx_SET (VOIDmode,
26073 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26074 gen_rtx_fmt_ee (comparison, QImode,
26078 return SUBREG_REG (target);
26081 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
26084 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
26085 tree exp, rtx target)
26088 tree arg0 = CALL_EXPR_ARG (exp, 0);
26089 tree arg1 = CALL_EXPR_ARG (exp, 1);
26090 tree arg2 = CALL_EXPR_ARG (exp, 2);
26091 tree arg3 = CALL_EXPR_ARG (exp, 3);
26092 tree arg4 = CALL_EXPR_ARG (exp, 4);
26093 rtx scratch0, scratch1;
26094 rtx op0 = expand_normal (arg0);
26095 rtx op1 = expand_normal (arg1);
26096 rtx op2 = expand_normal (arg2);
26097 rtx op3 = expand_normal (arg3);
26098 rtx op4 = expand_normal (arg4);
26099 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
26101 tmode0 = insn_data[d->icode].operand[0].mode;
26102 tmode1 = insn_data[d->icode].operand[1].mode;
26103 modev2 = insn_data[d->icode].operand[2].mode;
26104 modei3 = insn_data[d->icode].operand[3].mode;
26105 modev4 = insn_data[d->icode].operand[4].mode;
26106 modei5 = insn_data[d->icode].operand[5].mode;
26107 modeimm = insn_data[d->icode].operand[6].mode;
26109 if (VECTOR_MODE_P (modev2))
26110 op0 = safe_vector_operand (op0, modev2);
26111 if (VECTOR_MODE_P (modev4))
26112 op2 = safe_vector_operand (op2, modev4);
26114 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26115 op0 = copy_to_mode_reg (modev2, op0);
26116 if (!insn_data[d->icode].operand[3].predicate (op1, modei3))
26117 op1 = copy_to_mode_reg (modei3, op1);
26118 if ((optimize && !register_operand (op2, modev4))
26119 || !insn_data[d->icode].operand[4].predicate (op2, modev4))
26120 op2 = copy_to_mode_reg (modev4, op2);
26121 if (!insn_data[d->icode].operand[5].predicate (op3, modei5))
26122 op3 = copy_to_mode_reg (modei5, op3);
26124 if (!insn_data[d->icode].operand[6].predicate (op4, modeimm))
26126 error ("the fifth argument must be an 8-bit immediate");
26130 if (d->code == IX86_BUILTIN_PCMPESTRI128)
26132 if (optimize || !target
26133 || GET_MODE (target) != tmode0
26134 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26135 target = gen_reg_rtx (tmode0);
26137 scratch1 = gen_reg_rtx (tmode1);
26139 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
26141 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
26143 if (optimize || !target
26144 || GET_MODE (target) != tmode1
26145 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26146 target = gen_reg_rtx (tmode1);
26148 scratch0 = gen_reg_rtx (tmode0);
26150 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
26154 gcc_assert (d->flag);
26156 scratch0 = gen_reg_rtx (tmode0);
26157 scratch1 = gen_reg_rtx (tmode1);
26159 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
26169 target = gen_reg_rtx (SImode);
26170 emit_move_insn (target, const0_rtx);
26171 target = gen_rtx_SUBREG (QImode, target, 0);
26174 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26175 gen_rtx_fmt_ee (EQ, QImode,
26176 gen_rtx_REG ((enum machine_mode) d->flag,
26179 return SUBREG_REG (target);
26186 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
26189 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
26190 tree exp, rtx target)
26193 tree arg0 = CALL_EXPR_ARG (exp, 0);
26194 tree arg1 = CALL_EXPR_ARG (exp, 1);
26195 tree arg2 = CALL_EXPR_ARG (exp, 2);
26196 rtx scratch0, scratch1;
26197 rtx op0 = expand_normal (arg0);
26198 rtx op1 = expand_normal (arg1);
26199 rtx op2 = expand_normal (arg2);
26200 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
26202 tmode0 = insn_data[d->icode].operand[0].mode;
26203 tmode1 = insn_data[d->icode].operand[1].mode;
26204 modev2 = insn_data[d->icode].operand[2].mode;
26205 modev3 = insn_data[d->icode].operand[3].mode;
26206 modeimm = insn_data[d->icode].operand[4].mode;
26208 if (VECTOR_MODE_P (modev2))
26209 op0 = safe_vector_operand (op0, modev2);
26210 if (VECTOR_MODE_P (modev3))
26211 op1 = safe_vector_operand (op1, modev3);
26213 if (!insn_data[d->icode].operand[2].predicate (op0, modev2))
26214 op0 = copy_to_mode_reg (modev2, op0);
26215 if ((optimize && !register_operand (op1, modev3))
26216 || !insn_data[d->icode].operand[3].predicate (op1, modev3))
26217 op1 = copy_to_mode_reg (modev3, op1);
26219 if (!insn_data[d->icode].operand[4].predicate (op2, modeimm))
26221 error ("the third argument must be an 8-bit immediate");
26225 if (d->code == IX86_BUILTIN_PCMPISTRI128)
26227 if (optimize || !target
26228 || GET_MODE (target) != tmode0
26229 || !insn_data[d->icode].operand[0].predicate (target, tmode0))
26230 target = gen_reg_rtx (tmode0);
26232 scratch1 = gen_reg_rtx (tmode1);
26234 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
26236 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
26238 if (optimize || !target
26239 || GET_MODE (target) != tmode1
26240 || !insn_data[d->icode].operand[1].predicate (target, tmode1))
26241 target = gen_reg_rtx (tmode1);
26243 scratch0 = gen_reg_rtx (tmode0);
26245 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
26249 gcc_assert (d->flag);
26251 scratch0 = gen_reg_rtx (tmode0);
26252 scratch1 = gen_reg_rtx (tmode1);
26254 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
26264 target = gen_reg_rtx (SImode);
26265 emit_move_insn (target, const0_rtx);
26266 target = gen_rtx_SUBREG (QImode, target, 0);
26269 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
26270 gen_rtx_fmt_ee (EQ, QImode,
26271 gen_rtx_REG ((enum machine_mode) d->flag,
26274 return SUBREG_REG (target);
26280 /* Subroutine of ix86_expand_builtin to take care of insns with
26281 variable number of operands. */
26284 ix86_expand_args_builtin (const struct builtin_description *d,
26285 tree exp, rtx target)
26287 rtx pat, real_target;
26288 unsigned int i, nargs;
26289 unsigned int nargs_constant = 0;
26290 int num_memory = 0;
26294 enum machine_mode mode;
26296 bool last_arg_count = false;
26297 enum insn_code icode = d->icode;
26298 const struct insn_data_d *insn_p = &insn_data[icode];
26299 enum machine_mode tmode = insn_p->operand[0].mode;
26300 enum machine_mode rmode = VOIDmode;
26302 enum rtx_code comparison = d->comparison;
26304 switch ((enum ix86_builtin_func_type) d->flag)
26306 case V2DF_FTYPE_V2DF_ROUND:
26307 case V4DF_FTYPE_V4DF_ROUND:
26308 case V4SF_FTYPE_V4SF_ROUND:
26309 case V8SF_FTYPE_V8SF_ROUND:
26310 return ix86_expand_sse_round (d, exp, target);
26311 case INT_FTYPE_V8SF_V8SF_PTEST:
26312 case INT_FTYPE_V4DI_V4DI_PTEST:
26313 case INT_FTYPE_V4DF_V4DF_PTEST:
26314 case INT_FTYPE_V4SF_V4SF_PTEST:
26315 case INT_FTYPE_V2DI_V2DI_PTEST:
26316 case INT_FTYPE_V2DF_V2DF_PTEST:
26317 return ix86_expand_sse_ptest (d, exp, target);
26318 case FLOAT128_FTYPE_FLOAT128:
26319 case FLOAT_FTYPE_FLOAT:
26320 case INT_FTYPE_INT:
26321 case UINT64_FTYPE_INT:
26322 case UINT16_FTYPE_UINT16:
26323 case INT64_FTYPE_INT64:
26324 case INT64_FTYPE_V4SF:
26325 case INT64_FTYPE_V2DF:
26326 case INT_FTYPE_V16QI:
26327 case INT_FTYPE_V8QI:
26328 case INT_FTYPE_V8SF:
26329 case INT_FTYPE_V4DF:
26330 case INT_FTYPE_V4SF:
26331 case INT_FTYPE_V2DF:
26332 case V16QI_FTYPE_V16QI:
26333 case V8SI_FTYPE_V8SF:
26334 case V8SI_FTYPE_V4SI:
26335 case V8HI_FTYPE_V8HI:
26336 case V8HI_FTYPE_V16QI:
26337 case V8QI_FTYPE_V8QI:
26338 case V8SF_FTYPE_V8SF:
26339 case V8SF_FTYPE_V8SI:
26340 case V8SF_FTYPE_V4SF:
26341 case V8SF_FTYPE_V8HI:
26342 case V4SI_FTYPE_V4SI:
26343 case V4SI_FTYPE_V16QI:
26344 case V4SI_FTYPE_V4SF:
26345 case V4SI_FTYPE_V8SI:
26346 case V4SI_FTYPE_V8HI:
26347 case V4SI_FTYPE_V4DF:
26348 case V4SI_FTYPE_V2DF:
26349 case V4HI_FTYPE_V4HI:
26350 case V4DF_FTYPE_V4DF:
26351 case V4DF_FTYPE_V4SI:
26352 case V4DF_FTYPE_V4SF:
26353 case V4DF_FTYPE_V2DF:
26354 case V4SF_FTYPE_V4SF:
26355 case V4SF_FTYPE_V4SI:
26356 case V4SF_FTYPE_V8SF:
26357 case V4SF_FTYPE_V4DF:
26358 case V4SF_FTYPE_V8HI:
26359 case V4SF_FTYPE_V2DF:
26360 case V2DI_FTYPE_V2DI:
26361 case V2DI_FTYPE_V16QI:
26362 case V2DI_FTYPE_V8HI:
26363 case V2DI_FTYPE_V4SI:
26364 case V2DF_FTYPE_V2DF:
26365 case V2DF_FTYPE_V4SI:
26366 case V2DF_FTYPE_V4DF:
26367 case V2DF_FTYPE_V4SF:
26368 case V2DF_FTYPE_V2SI:
26369 case V2SI_FTYPE_V2SI:
26370 case V2SI_FTYPE_V4SF:
26371 case V2SI_FTYPE_V2SF:
26372 case V2SI_FTYPE_V2DF:
26373 case V2SF_FTYPE_V2SF:
26374 case V2SF_FTYPE_V2SI:
26377 case V4SF_FTYPE_V4SF_VEC_MERGE:
26378 case V2DF_FTYPE_V2DF_VEC_MERGE:
26379 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
26380 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
26381 case V16QI_FTYPE_V16QI_V16QI:
26382 case V16QI_FTYPE_V8HI_V8HI:
26383 case V8QI_FTYPE_V8QI_V8QI:
26384 case V8QI_FTYPE_V4HI_V4HI:
26385 case V8HI_FTYPE_V8HI_V8HI:
26386 case V8HI_FTYPE_V16QI_V16QI:
26387 case V8HI_FTYPE_V4SI_V4SI:
26388 case V8SF_FTYPE_V8SF_V8SF:
26389 case V8SF_FTYPE_V8SF_V8SI:
26390 case V4SI_FTYPE_V4SI_V4SI:
26391 case V4SI_FTYPE_V8HI_V8HI:
26392 case V4SI_FTYPE_V4SF_V4SF:
26393 case V4SI_FTYPE_V2DF_V2DF:
26394 case V4HI_FTYPE_V4HI_V4HI:
26395 case V4HI_FTYPE_V8QI_V8QI:
26396 case V4HI_FTYPE_V2SI_V2SI:
26397 case V4DF_FTYPE_V4DF_V4DF:
26398 case V4DF_FTYPE_V4DF_V4DI:
26399 case V4SF_FTYPE_V4SF_V4SF:
26400 case V4SF_FTYPE_V4SF_V4SI:
26401 case V4SF_FTYPE_V4SF_V2SI:
26402 case V4SF_FTYPE_V4SF_V2DF:
26403 case V4SF_FTYPE_V4SF_DI:
26404 case V4SF_FTYPE_V4SF_SI:
26405 case V2DI_FTYPE_V2DI_V2DI:
26406 case V2DI_FTYPE_V16QI_V16QI:
26407 case V2DI_FTYPE_V4SI_V4SI:
26408 case V2DI_FTYPE_V2DI_V16QI:
26409 case V2DI_FTYPE_V2DF_V2DF:
26410 case V2SI_FTYPE_V2SI_V2SI:
26411 case V2SI_FTYPE_V4HI_V4HI:
26412 case V2SI_FTYPE_V2SF_V2SF:
26413 case V2DF_FTYPE_V2DF_V2DF:
26414 case V2DF_FTYPE_V2DF_V4SF:
26415 case V2DF_FTYPE_V2DF_V2DI:
26416 case V2DF_FTYPE_V2DF_DI:
26417 case V2DF_FTYPE_V2DF_SI:
26418 case V2SF_FTYPE_V2SF_V2SF:
26419 case V1DI_FTYPE_V1DI_V1DI:
26420 case V1DI_FTYPE_V8QI_V8QI:
26421 case V1DI_FTYPE_V2SI_V2SI:
26422 if (comparison == UNKNOWN)
26423 return ix86_expand_binop_builtin (icode, exp, target);
26426 case V4SF_FTYPE_V4SF_V4SF_SWAP:
26427 case V2DF_FTYPE_V2DF_V2DF_SWAP:
26428 gcc_assert (comparison != UNKNOWN);
26432 case V8HI_FTYPE_V8HI_V8HI_COUNT:
26433 case V8HI_FTYPE_V8HI_SI_COUNT:
26434 case V4SI_FTYPE_V4SI_V4SI_COUNT:
26435 case V4SI_FTYPE_V4SI_SI_COUNT:
26436 case V4HI_FTYPE_V4HI_V4HI_COUNT:
26437 case V4HI_FTYPE_V4HI_SI_COUNT:
26438 case V2DI_FTYPE_V2DI_V2DI_COUNT:
26439 case V2DI_FTYPE_V2DI_SI_COUNT:
26440 case V2SI_FTYPE_V2SI_V2SI_COUNT:
26441 case V2SI_FTYPE_V2SI_SI_COUNT:
26442 case V1DI_FTYPE_V1DI_V1DI_COUNT:
26443 case V1DI_FTYPE_V1DI_SI_COUNT:
26445 last_arg_count = true;
26447 case UINT64_FTYPE_UINT64_UINT64:
26448 case UINT_FTYPE_UINT_UINT:
26449 case UINT_FTYPE_UINT_USHORT:
26450 case UINT_FTYPE_UINT_UCHAR:
26451 case UINT16_FTYPE_UINT16_INT:
26452 case UINT8_FTYPE_UINT8_INT:
26455 case V2DI_FTYPE_V2DI_INT_CONVERT:
26458 nargs_constant = 1;
26460 case V8HI_FTYPE_V8HI_INT:
26461 case V8HI_FTYPE_V8SF_INT:
26462 case V8HI_FTYPE_V4SF_INT:
26463 case V8SF_FTYPE_V8SF_INT:
26464 case V4SI_FTYPE_V4SI_INT:
26465 case V4SI_FTYPE_V8SI_INT:
26466 case V4HI_FTYPE_V4HI_INT:
26467 case V4DF_FTYPE_V4DF_INT:
26468 case V4SF_FTYPE_V4SF_INT:
26469 case V4SF_FTYPE_V8SF_INT:
26470 case V2DI_FTYPE_V2DI_INT:
26471 case V2DF_FTYPE_V2DF_INT:
26472 case V2DF_FTYPE_V4DF_INT:
26474 nargs_constant = 1;
26476 case V16QI_FTYPE_V16QI_V16QI_V16QI:
26477 case V8SF_FTYPE_V8SF_V8SF_V8SF:
26478 case V4DF_FTYPE_V4DF_V4DF_V4DF:
26479 case V4SF_FTYPE_V4SF_V4SF_V4SF:
26480 case V2DF_FTYPE_V2DF_V2DF_V2DF:
26483 case V16QI_FTYPE_V16QI_V16QI_INT:
26484 case V8HI_FTYPE_V8HI_V8HI_INT:
26485 case V8SI_FTYPE_V8SI_V8SI_INT:
26486 case V8SI_FTYPE_V8SI_V4SI_INT:
26487 case V8SF_FTYPE_V8SF_V8SF_INT:
26488 case V8SF_FTYPE_V8SF_V4SF_INT:
26489 case V4SI_FTYPE_V4SI_V4SI_INT:
26490 case V4DF_FTYPE_V4DF_V4DF_INT:
26491 case V4DF_FTYPE_V4DF_V2DF_INT:
26492 case V4SF_FTYPE_V4SF_V4SF_INT:
26493 case V2DI_FTYPE_V2DI_V2DI_INT:
26494 case V2DF_FTYPE_V2DF_V2DF_INT:
26496 nargs_constant = 1;
26498 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
26501 nargs_constant = 1;
26503 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
26506 nargs_constant = 1;
26508 case V2DI_FTYPE_V2DI_UINT_UINT:
26510 nargs_constant = 2;
26512 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
26513 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
26514 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
26515 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
26517 nargs_constant = 1;
26519 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
26521 nargs_constant = 2;
26524 gcc_unreachable ();
26527 gcc_assert (nargs <= ARRAY_SIZE (args));
26529 if (comparison != UNKNOWN)
26531 gcc_assert (nargs == 2);
26532 return ix86_expand_sse_compare (d, exp, target, swap);
26535 if (rmode == VOIDmode || rmode == tmode)
26539 || GET_MODE (target) != tmode
26540 || !insn_p->operand[0].predicate (target, tmode))
26541 target = gen_reg_rtx (tmode);
26542 real_target = target;
26546 target = gen_reg_rtx (rmode);
26547 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
26550 for (i = 0; i < nargs; i++)
26552 tree arg = CALL_EXPR_ARG (exp, i);
26553 rtx op = expand_normal (arg);
26554 enum machine_mode mode = insn_p->operand[i + 1].mode;
26555 bool match = insn_p->operand[i + 1].predicate (op, mode);
26557 if (last_arg_count && (i + 1) == nargs)
26559 /* SIMD shift insns take either an 8-bit immediate or
26560 register as count. But builtin functions take int as
26561 count. If count doesn't match, we put it in register. */
26564 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
26565 if (!insn_p->operand[i + 1].predicate (op, mode))
26566 op = copy_to_reg (op);
26569 else if ((nargs - i) <= nargs_constant)
26574 case CODE_FOR_sse4_1_roundpd:
26575 case CODE_FOR_sse4_1_roundps:
26576 case CODE_FOR_sse4_1_roundsd:
26577 case CODE_FOR_sse4_1_roundss:
26578 case CODE_FOR_sse4_1_blendps:
26579 case CODE_FOR_avx_blendpd256:
26580 case CODE_FOR_avx_vpermilv4df:
26581 case CODE_FOR_avx_roundpd256:
26582 case CODE_FOR_avx_roundps256:
26583 error ("the last argument must be a 4-bit immediate");
26586 case CODE_FOR_sse4_1_blendpd:
26587 case CODE_FOR_avx_vpermilv2df:
26588 case CODE_FOR_xop_vpermil2v2df3:
26589 case CODE_FOR_xop_vpermil2v4sf3:
26590 case CODE_FOR_xop_vpermil2v4df3:
26591 case CODE_FOR_xop_vpermil2v8sf3:
26592 error ("the last argument must be a 2-bit immediate");
26595 case CODE_FOR_avx_vextractf128v4df:
26596 case CODE_FOR_avx_vextractf128v8sf:
26597 case CODE_FOR_avx_vextractf128v8si:
26598 case CODE_FOR_avx_vinsertf128v4df:
26599 case CODE_FOR_avx_vinsertf128v8sf:
26600 case CODE_FOR_avx_vinsertf128v8si:
26601 error ("the last argument must be a 1-bit immediate");
26604 case CODE_FOR_avx_vmcmpv2df3:
26605 case CODE_FOR_avx_vmcmpv4sf3:
26606 case CODE_FOR_avx_cmpv2df3:
26607 case CODE_FOR_avx_cmpv4sf3:
26608 case CODE_FOR_avx_cmpv4df3:
26609 case CODE_FOR_avx_cmpv8sf3:
26610 error ("the last argument must be a 5-bit immediate");
26614 switch (nargs_constant)
26617 if ((nargs - i) == nargs_constant)
26619 error ("the next to last argument must be an 8-bit immediate");
26623 error ("the last argument must be an 8-bit immediate");
26626 gcc_unreachable ();
26633 if (VECTOR_MODE_P (mode))
26634 op = safe_vector_operand (op, mode);
26636 /* If we aren't optimizing, only allow one memory operand to
26638 if (memory_operand (op, mode))
26641 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
26643 if (optimize || !match || num_memory > 1)
26644 op = copy_to_mode_reg (mode, op);
26648 op = copy_to_reg (op);
26649 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
26654 args[i].mode = mode;
26660 pat = GEN_FCN (icode) (real_target, args[0].op);
26663 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
26666 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
26670 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
26671 args[2].op, args[3].op);
26674 gcc_unreachable ();
26684 /* Subroutine of ix86_expand_builtin to take care of special insns
26685 with variable number of operands. */
26688 ix86_expand_special_args_builtin (const struct builtin_description *d,
26689 tree exp, rtx target)
26693 unsigned int i, nargs, arg_adjust, memory;
26697 enum machine_mode mode;
26699 enum insn_code icode = d->icode;
26700 bool last_arg_constant = false;
26701 const struct insn_data_d *insn_p = &insn_data[icode];
26702 enum machine_mode tmode = insn_p->operand[0].mode;
26703 enum { load, store } klass;
26705 switch ((enum ix86_builtin_func_type) d->flag)
26707 case VOID_FTYPE_VOID:
26708 if (icode == CODE_FOR_avx_vzeroupper)
26709 target = GEN_INT (vzeroupper_intrinsic);
26710 emit_insn (GEN_FCN (icode) (target));
26712 case VOID_FTYPE_UINT64:
26713 case VOID_FTYPE_UNSIGNED:
26719 case UINT64_FTYPE_VOID:
26720 case UNSIGNED_FTYPE_VOID:
26725 case UINT64_FTYPE_PUNSIGNED:
26726 case V2DI_FTYPE_PV2DI:
26727 case V32QI_FTYPE_PCCHAR:
26728 case V16QI_FTYPE_PCCHAR:
26729 case V8SF_FTYPE_PCV4SF:
26730 case V8SF_FTYPE_PCFLOAT:
26731 case V4SF_FTYPE_PCFLOAT:
26732 case V4DF_FTYPE_PCV2DF:
26733 case V4DF_FTYPE_PCDOUBLE:
26734 case V2DF_FTYPE_PCDOUBLE:
26735 case VOID_FTYPE_PVOID:
26740 case VOID_FTYPE_PV2SF_V4SF:
26741 case VOID_FTYPE_PV4DI_V4DI:
26742 case VOID_FTYPE_PV2DI_V2DI:
26743 case VOID_FTYPE_PCHAR_V32QI:
26744 case VOID_FTYPE_PCHAR_V16QI:
26745 case VOID_FTYPE_PFLOAT_V8SF:
26746 case VOID_FTYPE_PFLOAT_V4SF:
26747 case VOID_FTYPE_PDOUBLE_V4DF:
26748 case VOID_FTYPE_PDOUBLE_V2DF:
26749 case VOID_FTYPE_PULONGLONG_ULONGLONG:
26750 case VOID_FTYPE_PINT_INT:
26753 /* Reserve memory operand for target. */
26754 memory = ARRAY_SIZE (args);
26756 case V4SF_FTYPE_V4SF_PCV2SF:
26757 case V2DF_FTYPE_V2DF_PCDOUBLE:
26762 case V8SF_FTYPE_PCV8SF_V8SI:
26763 case V4DF_FTYPE_PCV4DF_V4DI:
26764 case V4SF_FTYPE_PCV4SF_V4SI:
26765 case V2DF_FTYPE_PCV2DF_V2DI:
26770 case VOID_FTYPE_PV8SF_V8SI_V8SF:
26771 case VOID_FTYPE_PV4DF_V4DI_V4DF:
26772 case VOID_FTYPE_PV4SF_V4SI_V4SF:
26773 case VOID_FTYPE_PV2DF_V2DI_V2DF:
26776 /* Reserve memory operand for target. */
26777 memory = ARRAY_SIZE (args);
26779 case VOID_FTYPE_UINT_UINT_UINT:
26780 case VOID_FTYPE_UINT64_UINT_UINT:
26781 case UCHAR_FTYPE_UINT_UINT_UINT:
26782 case UCHAR_FTYPE_UINT64_UINT_UINT:
26785 memory = ARRAY_SIZE (args);
26786 last_arg_constant = true;
26789 gcc_unreachable ();
26792 gcc_assert (nargs <= ARRAY_SIZE (args));
26794 if (klass == store)
26796 arg = CALL_EXPR_ARG (exp, 0);
26797 op = expand_normal (arg);
26798 gcc_assert (target == 0);
26801 if (GET_MODE (op) != Pmode)
26802 op = convert_to_mode (Pmode, op, 1);
26803 target = gen_rtx_MEM (tmode, force_reg (Pmode, op));
26806 target = force_reg (tmode, op);
26814 || GET_MODE (target) != tmode
26815 || !insn_p->operand[0].predicate (target, tmode))
26816 target = gen_reg_rtx (tmode);
26819 for (i = 0; i < nargs; i++)
26821 enum machine_mode mode = insn_p->operand[i + 1].mode;
26824 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
26825 op = expand_normal (arg);
26826 match = insn_p->operand[i + 1].predicate (op, mode);
26828 if (last_arg_constant && (i + 1) == nargs)
26832 if (icode == CODE_FOR_lwp_lwpvalsi3
26833 || icode == CODE_FOR_lwp_lwpinssi3
26834 || icode == CODE_FOR_lwp_lwpvaldi3
26835 || icode == CODE_FOR_lwp_lwpinsdi3)
26836 error ("the last argument must be a 32-bit immediate");
26838 error ("the last argument must be an 8-bit immediate");
26846 /* This must be the memory operand. */
26847 if (GET_MODE (op) != Pmode)
26848 op = convert_to_mode (Pmode, op, 1);
26849 op = gen_rtx_MEM (mode, force_reg (Pmode, op));
26850 gcc_assert (GET_MODE (op) == mode
26851 || GET_MODE (op) == VOIDmode);
26855 /* This must be register. */
26856 if (VECTOR_MODE_P (mode))
26857 op = safe_vector_operand (op, mode);
26859 gcc_assert (GET_MODE (op) == mode
26860 || GET_MODE (op) == VOIDmode);
26861 op = copy_to_mode_reg (mode, op);
26866 args[i].mode = mode;
26872 pat = GEN_FCN (icode) (target);
26875 pat = GEN_FCN (icode) (target, args[0].op);
26878 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
26881 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
26884 gcc_unreachable ();
26890 return klass == store ? 0 : target;
26893 /* Return the integer constant in ARG. Constrain it to be in the range
26894 of the subparts of VEC_TYPE; issue an error if not. */
26897 get_element_number (tree vec_type, tree arg)
26899 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
26901 if (!host_integerp (arg, 1)
26902 || (elt = tree_low_cst (arg, 1), elt > max))
26904 error ("selector must be an integer constant in the range 0..%wi", max);
26911 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
26912 ix86_expand_vector_init. We DO have language-level syntax for this, in
26913 the form of (type){ init-list }. Except that since we can't place emms
26914 instructions from inside the compiler, we can't allow the use of MMX
26915 registers unless the user explicitly asks for it. So we do *not* define
26916 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
26917 we have builtins invoked by mmintrin.h that gives us license to emit
26918 these sorts of instructions. */
26921 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
26923 enum machine_mode tmode = TYPE_MODE (type);
26924 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
26925 int i, n_elt = GET_MODE_NUNITS (tmode);
26926 rtvec v = rtvec_alloc (n_elt);
26928 gcc_assert (VECTOR_MODE_P (tmode));
26929 gcc_assert (call_expr_nargs (exp) == n_elt);
26931 for (i = 0; i < n_elt; ++i)
26933 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
26934 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
26937 if (!target || !register_operand (target, tmode))
26938 target = gen_reg_rtx (tmode);
26940 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
26944 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
26945 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
26946 had a language-level syntax for referencing vector elements. */
26949 ix86_expand_vec_ext_builtin (tree exp, rtx target)
26951 enum machine_mode tmode, mode0;
26956 arg0 = CALL_EXPR_ARG (exp, 0);
26957 arg1 = CALL_EXPR_ARG (exp, 1);
26959 op0 = expand_normal (arg0);
26960 elt = get_element_number (TREE_TYPE (arg0), arg1);
26962 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
26963 mode0 = TYPE_MODE (TREE_TYPE (arg0));
26964 gcc_assert (VECTOR_MODE_P (mode0));
26966 op0 = force_reg (mode0, op0);
26968 if (optimize || !target || !register_operand (target, tmode))
26969 target = gen_reg_rtx (tmode);
26971 ix86_expand_vector_extract (true, target, op0, elt);
26976 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
26977 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
26978 a language-level syntax for referencing vector elements. */
26981 ix86_expand_vec_set_builtin (tree exp)
26983 enum machine_mode tmode, mode1;
26984 tree arg0, arg1, arg2;
26986 rtx op0, op1, target;
26988 arg0 = CALL_EXPR_ARG (exp, 0);
26989 arg1 = CALL_EXPR_ARG (exp, 1);
26990 arg2 = CALL_EXPR_ARG (exp, 2);
26992 tmode = TYPE_MODE (TREE_TYPE (arg0));
26993 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
26994 gcc_assert (VECTOR_MODE_P (tmode));
26996 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
26997 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
26998 elt = get_element_number (TREE_TYPE (arg0), arg2);
27000 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
27001 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
27003 op0 = force_reg (tmode, op0);
27004 op1 = force_reg (mode1, op1);
27006 /* OP0 is the source of these builtin functions and shouldn't be
27007 modified. Create a copy, use it and return it as target. */
27008 target = gen_reg_rtx (tmode);
27009 emit_move_insn (target, op0);
27010 ix86_expand_vector_set (true, target, op1, elt);
27015 /* Expand an expression EXP that calls a built-in function,
27016 with result going to TARGET if that's convenient
27017 (and in mode MODE if that's convenient).
27018 SUBTARGET may be used as the target for computing one of EXP's operands.
27019 IGNORE is nonzero if the value is to be ignored. */
27022 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
27023 enum machine_mode mode ATTRIBUTE_UNUSED,
27024 int ignore ATTRIBUTE_UNUSED)
27026 const struct builtin_description *d;
27028 enum insn_code icode;
27029 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
27030 tree arg0, arg1, arg2;
27031 rtx op0, op1, op2, pat;
27032 enum machine_mode mode0, mode1, mode2;
27033 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
27035 /* Determine whether the builtin function is available under the current ISA.
27036 Originally the builtin was not created if it wasn't applicable to the
27037 current ISA based on the command line switches. With function specific
27038 options, we need to check in the context of the function making the call
27039 whether it is supported. */
27040 if (ix86_builtins_isa[fcode].isa
27041 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
27043 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
27044 NULL, (enum fpmath_unit) 0, false);
27047 error ("%qE needs unknown isa option", fndecl);
27050 gcc_assert (opts != NULL);
27051 error ("%qE needs isa option %s", fndecl, opts);
27059 case IX86_BUILTIN_MASKMOVQ:
27060 case IX86_BUILTIN_MASKMOVDQU:
27061 icode = (fcode == IX86_BUILTIN_MASKMOVQ
27062 ? CODE_FOR_mmx_maskmovq
27063 : CODE_FOR_sse2_maskmovdqu);
27064 /* Note the arg order is different from the operand order. */
27065 arg1 = CALL_EXPR_ARG (exp, 0);
27066 arg2 = CALL_EXPR_ARG (exp, 1);
27067 arg0 = CALL_EXPR_ARG (exp, 2);
27068 op0 = expand_normal (arg0);
27069 op1 = expand_normal (arg1);
27070 op2 = expand_normal (arg2);
27071 mode0 = insn_data[icode].operand[0].mode;
27072 mode1 = insn_data[icode].operand[1].mode;
27073 mode2 = insn_data[icode].operand[2].mode;
27075 if (GET_MODE (op0) != Pmode)
27076 op0 = convert_to_mode (Pmode, op0, 1);
27077 op0 = gen_rtx_MEM (mode1, force_reg (Pmode, op0));
27079 if (!insn_data[icode].operand[0].predicate (op0, mode0))
27080 op0 = copy_to_mode_reg (mode0, op0);
27081 if (!insn_data[icode].operand[1].predicate (op1, mode1))
27082 op1 = copy_to_mode_reg (mode1, op1);
27083 if (!insn_data[icode].operand[2].predicate (op2, mode2))
27084 op2 = copy_to_mode_reg (mode2, op2);
27085 pat = GEN_FCN (icode) (op0, op1, op2);
27091 case IX86_BUILTIN_LDMXCSR:
27092 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
27093 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27094 emit_move_insn (target, op0);
27095 emit_insn (gen_sse_ldmxcsr (target));
27098 case IX86_BUILTIN_STMXCSR:
27099 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
27100 emit_insn (gen_sse_stmxcsr (target));
27101 return copy_to_mode_reg (SImode, target);
27103 case IX86_BUILTIN_CLFLUSH:
27104 arg0 = CALL_EXPR_ARG (exp, 0);
27105 op0 = expand_normal (arg0);
27106 icode = CODE_FOR_sse2_clflush;
27107 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27109 if (GET_MODE (op0) != Pmode)
27110 op0 = convert_to_mode (Pmode, op0, 1);
27111 op0 = force_reg (Pmode, op0);
27114 emit_insn (gen_sse2_clflush (op0));
27117 case IX86_BUILTIN_MONITOR:
27118 arg0 = CALL_EXPR_ARG (exp, 0);
27119 arg1 = CALL_EXPR_ARG (exp, 1);
27120 arg2 = CALL_EXPR_ARG (exp, 2);
27121 op0 = expand_normal (arg0);
27122 op1 = expand_normal (arg1);
27123 op2 = expand_normal (arg2);
27126 if (GET_MODE (op0) != Pmode)
27127 op0 = convert_to_mode (Pmode, op0, 1);
27128 op0 = force_reg (Pmode, op0);
27131 op1 = copy_to_mode_reg (SImode, op1);
27133 op2 = copy_to_mode_reg (SImode, op2);
27134 emit_insn (ix86_gen_monitor (op0, op1, op2));
27137 case IX86_BUILTIN_MWAIT:
27138 arg0 = CALL_EXPR_ARG (exp, 0);
27139 arg1 = CALL_EXPR_ARG (exp, 1);
27140 op0 = expand_normal (arg0);
27141 op1 = expand_normal (arg1);
27143 op0 = copy_to_mode_reg (SImode, op0);
27145 op1 = copy_to_mode_reg (SImode, op1);
27146 emit_insn (gen_sse3_mwait (op0, op1));
27149 case IX86_BUILTIN_VEC_INIT_V2SI:
27150 case IX86_BUILTIN_VEC_INIT_V4HI:
27151 case IX86_BUILTIN_VEC_INIT_V8QI:
27152 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
27154 case IX86_BUILTIN_VEC_EXT_V2DF:
27155 case IX86_BUILTIN_VEC_EXT_V2DI:
27156 case IX86_BUILTIN_VEC_EXT_V4SF:
27157 case IX86_BUILTIN_VEC_EXT_V4SI:
27158 case IX86_BUILTIN_VEC_EXT_V8HI:
27159 case IX86_BUILTIN_VEC_EXT_V2SI:
27160 case IX86_BUILTIN_VEC_EXT_V4HI:
27161 case IX86_BUILTIN_VEC_EXT_V16QI:
27162 return ix86_expand_vec_ext_builtin (exp, target);
27164 case IX86_BUILTIN_VEC_SET_V2DI:
27165 case IX86_BUILTIN_VEC_SET_V4SF:
27166 case IX86_BUILTIN_VEC_SET_V4SI:
27167 case IX86_BUILTIN_VEC_SET_V8HI:
27168 case IX86_BUILTIN_VEC_SET_V4HI:
27169 case IX86_BUILTIN_VEC_SET_V16QI:
27170 return ix86_expand_vec_set_builtin (exp);
27172 case IX86_BUILTIN_VEC_PERM_V2DF:
27173 case IX86_BUILTIN_VEC_PERM_V4SF:
27174 case IX86_BUILTIN_VEC_PERM_V2DI:
27175 case IX86_BUILTIN_VEC_PERM_V4SI:
27176 case IX86_BUILTIN_VEC_PERM_V8HI:
27177 case IX86_BUILTIN_VEC_PERM_V16QI:
27178 case IX86_BUILTIN_VEC_PERM_V2DI_U:
27179 case IX86_BUILTIN_VEC_PERM_V4SI_U:
27180 case IX86_BUILTIN_VEC_PERM_V8HI_U:
27181 case IX86_BUILTIN_VEC_PERM_V16QI_U:
27182 case IX86_BUILTIN_VEC_PERM_V4DF:
27183 case IX86_BUILTIN_VEC_PERM_V8SF:
27184 return ix86_expand_vec_perm_builtin (exp);
27186 case IX86_BUILTIN_INFQ:
27187 case IX86_BUILTIN_HUGE_VALQ:
27189 REAL_VALUE_TYPE inf;
27193 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
27195 tmp = validize_mem (force_const_mem (mode, tmp));
27198 target = gen_reg_rtx (mode);
27200 emit_move_insn (target, tmp);
27204 case IX86_BUILTIN_LLWPCB:
27205 arg0 = CALL_EXPR_ARG (exp, 0);
27206 op0 = expand_normal (arg0);
27207 icode = CODE_FOR_lwp_llwpcb;
27208 if (!insn_data[icode].operand[0].predicate (op0, Pmode))
27210 if (GET_MODE (op0) != Pmode)
27211 op0 = convert_to_mode (Pmode, op0, 1);
27212 op0 = force_reg (Pmode, op0);
27214 emit_insn (gen_lwp_llwpcb (op0));
27217 case IX86_BUILTIN_SLWPCB:
27218 icode = CODE_FOR_lwp_slwpcb;
27220 || !insn_data[icode].operand[0].predicate (target, Pmode))
27221 target = gen_reg_rtx (Pmode);
27222 emit_insn (gen_lwp_slwpcb (target));
27225 case IX86_BUILTIN_BEXTRI32:
27226 case IX86_BUILTIN_BEXTRI64:
27227 arg0 = CALL_EXPR_ARG (exp, 0);
27228 arg1 = CALL_EXPR_ARG (exp, 1);
27229 op0 = expand_normal (arg0);
27230 op1 = expand_normal (arg1);
27231 icode = (fcode == IX86_BUILTIN_BEXTRI32
27232 ? CODE_FOR_tbm_bextri_si
27233 : CODE_FOR_tbm_bextri_di);
27234 if (!CONST_INT_P (op1))
27236 error ("last argument must be an immediate");
27241 unsigned char length = (INTVAL (op1) >> 8) & 0xFF;
27242 unsigned char lsb_index = INTVAL (op1) & 0xFF;
27243 op1 = GEN_INT (length);
27244 op2 = GEN_INT (lsb_index);
27245 pat = GEN_FCN (icode) (target, op0, op1, op2);
27251 case IX86_BUILTIN_RDRAND16_STEP:
27252 icode = CODE_FOR_rdrandhi_1;
27256 case IX86_BUILTIN_RDRAND32_STEP:
27257 icode = CODE_FOR_rdrandsi_1;
27261 case IX86_BUILTIN_RDRAND64_STEP:
27262 icode = CODE_FOR_rdranddi_1;
27266 op0 = gen_reg_rtx (mode0);
27267 emit_insn (GEN_FCN (icode) (op0));
27269 arg0 = CALL_EXPR_ARG (exp, 0);
27270 op1 = expand_normal (arg0);
27271 if (!address_operand (op1, VOIDmode))
27273 op1 = convert_memory_address (Pmode, op1);
27274 op1 = copy_addr_to_reg (op1);
27276 emit_move_insn (gen_rtx_MEM (mode0, op1), op0);
27278 op1 = gen_reg_rtx (SImode);
27279 emit_move_insn (op1, CONST1_RTX (SImode));
27281 /* Emit SImode conditional move. */
27282 if (mode0 == HImode)
27284 op2 = gen_reg_rtx (SImode);
27285 emit_insn (gen_zero_extendhisi2 (op2, op0));
27287 else if (mode0 == SImode)
27290 op2 = gen_rtx_SUBREG (SImode, op0, 0);
27293 target = gen_reg_rtx (SImode);
27295 pat = gen_rtx_GEU (VOIDmode, gen_rtx_REG (CCCmode, FLAGS_REG),
27297 emit_insn (gen_rtx_SET (VOIDmode, target,
27298 gen_rtx_IF_THEN_ELSE (SImode, pat, op2, op1)));
27305 for (i = 0, d = bdesc_special_args;
27306 i < ARRAY_SIZE (bdesc_special_args);
27308 if (d->code == fcode)
27309 return ix86_expand_special_args_builtin (d, exp, target);
27311 for (i = 0, d = bdesc_args;
27312 i < ARRAY_SIZE (bdesc_args);
27314 if (d->code == fcode)
27317 case IX86_BUILTIN_FABSQ:
27318 case IX86_BUILTIN_COPYSIGNQ:
27320 /* Emit a normal call if SSE2 isn't available. */
27321 return expand_call (exp, target, ignore);
27323 return ix86_expand_args_builtin (d, exp, target);
27326 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
27327 if (d->code == fcode)
27328 return ix86_expand_sse_comi (d, exp, target);
27330 for (i = 0, d = bdesc_pcmpestr;
27331 i < ARRAY_SIZE (bdesc_pcmpestr);
27333 if (d->code == fcode)
27334 return ix86_expand_sse_pcmpestr (d, exp, target);
27336 for (i = 0, d = bdesc_pcmpistr;
27337 i < ARRAY_SIZE (bdesc_pcmpistr);
27339 if (d->code == fcode)
27340 return ix86_expand_sse_pcmpistr (d, exp, target);
27342 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
27343 if (d->code == fcode)
27344 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
27345 (enum ix86_builtin_func_type)
27346 d->flag, d->comparison);
27348 gcc_unreachable ();
27351 /* Returns a function decl for a vectorized version of the builtin function
27352 with builtin function code FN and the result vector type TYPE, or NULL_TREE
27353 if it is not available. */
27356 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
27359 enum machine_mode in_mode, out_mode;
27361 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
27363 if (TREE_CODE (type_out) != VECTOR_TYPE
27364 || TREE_CODE (type_in) != VECTOR_TYPE
27365 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
27368 out_mode = TYPE_MODE (TREE_TYPE (type_out));
27369 out_n = TYPE_VECTOR_SUBPARTS (type_out);
27370 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27371 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27375 case BUILT_IN_SQRT:
27376 if (out_mode == DFmode && in_mode == DFmode)
27378 if (out_n == 2 && in_n == 2)
27379 return ix86_builtins[IX86_BUILTIN_SQRTPD];
27380 else if (out_n == 4 && in_n == 4)
27381 return ix86_builtins[IX86_BUILTIN_SQRTPD256];
27385 case BUILT_IN_SQRTF:
27386 if (out_mode == SFmode && in_mode == SFmode)
27388 if (out_n == 4 && in_n == 4)
27389 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
27390 else if (out_n == 8 && in_n == 8)
27391 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR256];
27395 case BUILT_IN_LRINT:
27396 if (out_mode == SImode && out_n == 4
27397 && in_mode == DFmode && in_n == 2)
27398 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
27401 case BUILT_IN_LRINTF:
27402 if (out_mode == SImode && in_mode == SFmode)
27404 if (out_n == 4 && in_n == 4)
27405 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
27406 else if (out_n == 8 && in_n == 8)
27407 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ256];
27411 case BUILT_IN_COPYSIGN:
27412 if (out_mode == DFmode && in_mode == DFmode)
27414 if (out_n == 2 && in_n == 2)
27415 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
27416 else if (out_n == 4 && in_n == 4)
27417 return ix86_builtins[IX86_BUILTIN_CPYSGNPD256];
27421 case BUILT_IN_COPYSIGNF:
27422 if (out_mode == SFmode && in_mode == SFmode)
27424 if (out_n == 4 && in_n == 4)
27425 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
27426 else if (out_n == 8 && in_n == 8)
27427 return ix86_builtins[IX86_BUILTIN_CPYSGNPS256];
27431 case BUILT_IN_FLOOR:
27432 /* The round insn does not trap on denormals. */
27433 if (flag_trapping_math || !TARGET_ROUND)
27436 if (out_mode == DFmode && in_mode == DFmode)
27438 if (out_n == 2 && in_n == 2)
27439 return ix86_builtins[IX86_BUILTIN_FLOORPD];
27440 else if (out_n == 4 && in_n == 4)
27441 return ix86_builtins[IX86_BUILTIN_FLOORPD256];
27445 case BUILT_IN_FLOORF:
27446 /* The round insn does not trap on denormals. */
27447 if (flag_trapping_math || !TARGET_ROUND)
27450 if (out_mode == SFmode && in_mode == SFmode)
27452 if (out_n == 4 && in_n == 4)
27453 return ix86_builtins[IX86_BUILTIN_FLOORPS];
27454 else if (out_n == 8 && in_n == 8)
27455 return ix86_builtins[IX86_BUILTIN_FLOORPS256];
27459 case BUILT_IN_CEIL:
27460 /* The round insn does not trap on denormals. */
27461 if (flag_trapping_math || !TARGET_ROUND)
27464 if (out_mode == DFmode && in_mode == DFmode)
27466 if (out_n == 2 && in_n == 2)
27467 return ix86_builtins[IX86_BUILTIN_CEILPD];
27468 else if (out_n == 4 && in_n == 4)
27469 return ix86_builtins[IX86_BUILTIN_CEILPD256];
27473 case BUILT_IN_CEILF:
27474 /* The round insn does not trap on denormals. */
27475 if (flag_trapping_math || !TARGET_ROUND)
27478 if (out_mode == SFmode && in_mode == SFmode)
27480 if (out_n == 4 && in_n == 4)
27481 return ix86_builtins[IX86_BUILTIN_CEILPS];
27482 else if (out_n == 8 && in_n == 8)
27483 return ix86_builtins[IX86_BUILTIN_CEILPS256];
27487 case BUILT_IN_TRUNC:
27488 /* The round insn does not trap on denormals. */
27489 if (flag_trapping_math || !TARGET_ROUND)
27492 if (out_mode == DFmode && in_mode == DFmode)
27494 if (out_n == 2 && in_n == 2)
27495 return ix86_builtins[IX86_BUILTIN_TRUNCPD];
27496 else if (out_n == 4 && in_n == 4)
27497 return ix86_builtins[IX86_BUILTIN_TRUNCPD256];
27501 case BUILT_IN_TRUNCF:
27502 /* The round insn does not trap on denormals. */
27503 if (flag_trapping_math || !TARGET_ROUND)
27506 if (out_mode == SFmode && in_mode == SFmode)
27508 if (out_n == 4 && in_n == 4)
27509 return ix86_builtins[IX86_BUILTIN_TRUNCPS];
27510 else if (out_n == 8 && in_n == 8)
27511 return ix86_builtins[IX86_BUILTIN_TRUNCPS256];
27515 case BUILT_IN_RINT:
27516 /* The round insn does not trap on denormals. */
27517 if (flag_trapping_math || !TARGET_ROUND)
27520 if (out_mode == DFmode && in_mode == DFmode)
27522 if (out_n == 2 && in_n == 2)
27523 return ix86_builtins[IX86_BUILTIN_RINTPD];
27524 else if (out_n == 4 && in_n == 4)
27525 return ix86_builtins[IX86_BUILTIN_RINTPD256];
27529 case BUILT_IN_RINTF:
27530 /* The round insn does not trap on denormals. */
27531 if (flag_trapping_math || !TARGET_ROUND)
27534 if (out_mode == SFmode && in_mode == SFmode)
27536 if (out_n == 4 && in_n == 4)
27537 return ix86_builtins[IX86_BUILTIN_RINTPS];
27538 else if (out_n == 8 && in_n == 8)
27539 return ix86_builtins[IX86_BUILTIN_RINTPS256];
27544 if (out_mode == DFmode && in_mode == DFmode)
27546 if (out_n == 2 && in_n == 2)
27547 return ix86_builtins[IX86_BUILTIN_VFMADDPD];
27548 if (out_n == 4 && in_n == 4)
27549 return ix86_builtins[IX86_BUILTIN_VFMADDPD256];
27553 case BUILT_IN_FMAF:
27554 if (out_mode == SFmode && in_mode == SFmode)
27556 if (out_n == 4 && in_n == 4)
27557 return ix86_builtins[IX86_BUILTIN_VFMADDPS];
27558 if (out_n == 8 && in_n == 8)
27559 return ix86_builtins[IX86_BUILTIN_VFMADDPS256];
27567 /* Dispatch to a handler for a vectorization library. */
27568 if (ix86_veclib_handler)
27569 return ix86_veclib_handler ((enum built_in_function) fn, type_out,
27575 /* Handler for an SVML-style interface to
27576 a library with vectorized intrinsics. */
27579 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
27582 tree fntype, new_fndecl, args;
27585 enum machine_mode el_mode, in_mode;
27588 /* The SVML is suitable for unsafe math only. */
27589 if (!flag_unsafe_math_optimizations)
27592 el_mode = TYPE_MODE (TREE_TYPE (type_out));
27593 n = TYPE_VECTOR_SUBPARTS (type_out);
27594 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27595 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27596 if (el_mode != in_mode
27604 case BUILT_IN_LOG10:
27606 case BUILT_IN_TANH:
27608 case BUILT_IN_ATAN:
27609 case BUILT_IN_ATAN2:
27610 case BUILT_IN_ATANH:
27611 case BUILT_IN_CBRT:
27612 case BUILT_IN_SINH:
27614 case BUILT_IN_ASINH:
27615 case BUILT_IN_ASIN:
27616 case BUILT_IN_COSH:
27618 case BUILT_IN_ACOSH:
27619 case BUILT_IN_ACOS:
27620 if (el_mode != DFmode || n != 2)
27624 case BUILT_IN_EXPF:
27625 case BUILT_IN_LOGF:
27626 case BUILT_IN_LOG10F:
27627 case BUILT_IN_POWF:
27628 case BUILT_IN_TANHF:
27629 case BUILT_IN_TANF:
27630 case BUILT_IN_ATANF:
27631 case BUILT_IN_ATAN2F:
27632 case BUILT_IN_ATANHF:
27633 case BUILT_IN_CBRTF:
27634 case BUILT_IN_SINHF:
27635 case BUILT_IN_SINF:
27636 case BUILT_IN_ASINHF:
27637 case BUILT_IN_ASINF:
27638 case BUILT_IN_COSHF:
27639 case BUILT_IN_COSF:
27640 case BUILT_IN_ACOSHF:
27641 case BUILT_IN_ACOSF:
27642 if (el_mode != SFmode || n != 4)
27650 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
27652 if (fn == BUILT_IN_LOGF)
27653 strcpy (name, "vmlsLn4");
27654 else if (fn == BUILT_IN_LOG)
27655 strcpy (name, "vmldLn2");
27658 sprintf (name, "vmls%s", bname+10);
27659 name[strlen (name)-1] = '4';
27662 sprintf (name, "vmld%s2", bname+10);
27664 /* Convert to uppercase. */
27668 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
27669 args = TREE_CHAIN (args))
27673 fntype = build_function_type_list (type_out, type_in, NULL);
27675 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
27677 /* Build a function declaration for the vectorized function. */
27678 new_fndecl = build_decl (BUILTINS_LOCATION,
27679 FUNCTION_DECL, get_identifier (name), fntype);
27680 TREE_PUBLIC (new_fndecl) = 1;
27681 DECL_EXTERNAL (new_fndecl) = 1;
27682 DECL_IS_NOVOPS (new_fndecl) = 1;
27683 TREE_READONLY (new_fndecl) = 1;
27688 /* Handler for an ACML-style interface to
27689 a library with vectorized intrinsics. */
27692 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
27694 char name[20] = "__vr.._";
27695 tree fntype, new_fndecl, args;
27698 enum machine_mode el_mode, in_mode;
27701 /* The ACML is 64bits only and suitable for unsafe math only as
27702 it does not correctly support parts of IEEE with the required
27703 precision such as denormals. */
27705 || !flag_unsafe_math_optimizations)
27708 el_mode = TYPE_MODE (TREE_TYPE (type_out));
27709 n = TYPE_VECTOR_SUBPARTS (type_out);
27710 in_mode = TYPE_MODE (TREE_TYPE (type_in));
27711 in_n = TYPE_VECTOR_SUBPARTS (type_in);
27712 if (el_mode != in_mode
27722 case BUILT_IN_LOG2:
27723 case BUILT_IN_LOG10:
27726 if (el_mode != DFmode
27731 case BUILT_IN_SINF:
27732 case BUILT_IN_COSF:
27733 case BUILT_IN_EXPF:
27734 case BUILT_IN_POWF:
27735 case BUILT_IN_LOGF:
27736 case BUILT_IN_LOG2F:
27737 case BUILT_IN_LOG10F:
27740 if (el_mode != SFmode
27749 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
27750 sprintf (name + 7, "%s", bname+10);
27753 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
27754 args = TREE_CHAIN (args))
27758 fntype = build_function_type_list (type_out, type_in, NULL);
27760 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
27762 /* Build a function declaration for the vectorized function. */
27763 new_fndecl = build_decl (BUILTINS_LOCATION,
27764 FUNCTION_DECL, get_identifier (name), fntype);
27765 TREE_PUBLIC (new_fndecl) = 1;
27766 DECL_EXTERNAL (new_fndecl) = 1;
27767 DECL_IS_NOVOPS (new_fndecl) = 1;
27768 TREE_READONLY (new_fndecl) = 1;
27774 /* Returns a decl of a function that implements conversion of an integer vector
27775 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
27776 are the types involved when converting according to CODE.
27777 Return NULL_TREE if it is not available. */
27780 ix86_vectorize_builtin_conversion (unsigned int code,
27781 tree dest_type, tree src_type)
27789 switch (TYPE_MODE (src_type))
27792 switch (TYPE_MODE (dest_type))
27795 return (TYPE_UNSIGNED (src_type)
27796 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
27797 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
27799 return (TYPE_UNSIGNED (src_type)
27801 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
27807 switch (TYPE_MODE (dest_type))
27810 return (TYPE_UNSIGNED (src_type)
27812 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS256]);
27821 case FIX_TRUNC_EXPR:
27822 switch (TYPE_MODE (dest_type))
27825 switch (TYPE_MODE (src_type))
27828 return (TYPE_UNSIGNED (dest_type)
27830 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
27832 return (TYPE_UNSIGNED (dest_type)
27834 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
27841 switch (TYPE_MODE (src_type))
27844 return (TYPE_UNSIGNED (dest_type)
27846 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
27863 /* Returns a code for a target-specific builtin that implements
27864 reciprocal of the function, or NULL_TREE if not available. */
27867 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
27868 bool sqrt ATTRIBUTE_UNUSED)
27870 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
27871 && flag_finite_math_only && !flag_trapping_math
27872 && flag_unsafe_math_optimizations))
27876 /* Machine dependent builtins. */
27879 /* Vectorized version of sqrt to rsqrt conversion. */
27880 case IX86_BUILTIN_SQRTPS_NR:
27881 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
27883 case IX86_BUILTIN_SQRTPS_NR256:
27884 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR256];
27890 /* Normal builtins. */
27893 /* Sqrt to rsqrt conversion. */
27894 case BUILT_IN_SQRTF:
27895 return ix86_builtins[IX86_BUILTIN_RSQRTF];
27902 /* Helper for avx_vpermilps256_operand et al. This is also used by
27903 the expansion functions to turn the parallel back into a mask.
27904 The return value is 0 for no match and the imm8+1 for a match. */
27907 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
27909 unsigned i, nelt = GET_MODE_NUNITS (mode);
27911 unsigned char ipar[8];
27913 if (XVECLEN (par, 0) != (int) nelt)
27916 /* Validate that all of the elements are constants, and not totally
27917 out of range. Copy the data into an integral array to make the
27918 subsequent checks easier. */
27919 for (i = 0; i < nelt; ++i)
27921 rtx er = XVECEXP (par, 0, i);
27922 unsigned HOST_WIDE_INT ei;
27924 if (!CONST_INT_P (er))
27935 /* In the 256-bit DFmode case, we can only move elements within
27937 for (i = 0; i < 2; ++i)
27941 mask |= ipar[i] << i;
27943 for (i = 2; i < 4; ++i)
27947 mask |= (ipar[i] - 2) << i;
27952 /* In the 256-bit SFmode case, we have full freedom of movement
27953 within the low 128-bit lane, but the high 128-bit lane must
27954 mirror the exact same pattern. */
27955 for (i = 0; i < 4; ++i)
27956 if (ipar[i] + 4 != ipar[i + 4])
27963 /* In the 128-bit case, we've full freedom in the placement of
27964 the elements from the source operand. */
27965 for (i = 0; i < nelt; ++i)
27966 mask |= ipar[i] << (i * (nelt / 2));
27970 gcc_unreachable ();
27973 /* Make sure success has a non-zero value by adding one. */
27977 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
27978 the expansion functions to turn the parallel back into a mask.
27979 The return value is 0 for no match and the imm8+1 for a match. */
27982 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
27984 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
27986 unsigned char ipar[8];
27988 if (XVECLEN (par, 0) != (int) nelt)
27991 /* Validate that all of the elements are constants, and not totally
27992 out of range. Copy the data into an integral array to make the
27993 subsequent checks easier. */
27994 for (i = 0; i < nelt; ++i)
27996 rtx er = XVECEXP (par, 0, i);
27997 unsigned HOST_WIDE_INT ei;
27999 if (!CONST_INT_P (er))
28002 if (ei >= 2 * nelt)
28007 /* Validate that the halves of the permute are halves. */
28008 for (i = 0; i < nelt2 - 1; ++i)
28009 if (ipar[i] + 1 != ipar[i + 1])
28011 for (i = nelt2; i < nelt - 1; ++i)
28012 if (ipar[i] + 1 != ipar[i + 1])
28015 /* Reconstruct the mask. */
28016 for (i = 0; i < 2; ++i)
28018 unsigned e = ipar[i * nelt2];
28022 mask |= e << (i * 4);
28025 /* Make sure success has a non-zero value by adding one. */
28030 /* Store OPERAND to the memory after reload is completed. This means
28031 that we can't easily use assign_stack_local. */
28033 ix86_force_to_memory (enum machine_mode mode, rtx operand)
28037 gcc_assert (reload_completed);
28038 if (ix86_using_red_zone ())
28040 result = gen_rtx_MEM (mode,
28041 gen_rtx_PLUS (Pmode,
28043 GEN_INT (-RED_ZONE_SIZE)));
28044 emit_move_insn (result, operand);
28046 else if (TARGET_64BIT)
28052 operand = gen_lowpart (DImode, operand);
28056 gen_rtx_SET (VOIDmode,
28057 gen_rtx_MEM (DImode,
28058 gen_rtx_PRE_DEC (DImode,
28059 stack_pointer_rtx)),
28063 gcc_unreachable ();
28065 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28074 split_double_mode (mode, &operand, 1, operands, operands + 1);
28076 gen_rtx_SET (VOIDmode,
28077 gen_rtx_MEM (SImode,
28078 gen_rtx_PRE_DEC (Pmode,
28079 stack_pointer_rtx)),
28082 gen_rtx_SET (VOIDmode,
28083 gen_rtx_MEM (SImode,
28084 gen_rtx_PRE_DEC (Pmode,
28085 stack_pointer_rtx)),
28090 /* Store HImodes as SImodes. */
28091 operand = gen_lowpart (SImode, operand);
28095 gen_rtx_SET (VOIDmode,
28096 gen_rtx_MEM (GET_MODE (operand),
28097 gen_rtx_PRE_DEC (SImode,
28098 stack_pointer_rtx)),
28102 gcc_unreachable ();
28104 result = gen_rtx_MEM (mode, stack_pointer_rtx);
28109 /* Free operand from the memory. */
28111 ix86_free_from_memory (enum machine_mode mode)
28113 if (!ix86_using_red_zone ())
28117 if (mode == DImode || TARGET_64BIT)
28121 /* Use LEA to deallocate stack space. In peephole2 it will be converted
28122 to pop or add instruction if registers are available. */
28123 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
28124 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
28129 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
28131 Put float CONST_DOUBLE in the constant pool instead of fp regs.
28132 QImode must go into class Q_REGS.
28133 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
28134 movdf to do mem-to-mem moves through integer regs. */
28137 ix86_preferred_reload_class (rtx x, reg_class_t regclass)
28139 enum machine_mode mode = GET_MODE (x);
28141 /* We're only allowed to return a subclass of CLASS. Many of the
28142 following checks fail for NO_REGS, so eliminate that early. */
28143 if (regclass == NO_REGS)
28146 /* All classes can load zeros. */
28147 if (x == CONST0_RTX (mode))
28150 /* Force constants into memory if we are loading a (nonzero) constant into
28151 an MMX or SSE register. This is because there are no MMX/SSE instructions
28152 to load from a constant. */
28154 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
28157 /* Prefer SSE regs only, if we can use them for math. */
28158 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
28159 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
28161 /* Floating-point constants need more complex checks. */
28162 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
28164 /* General regs can load everything. */
28165 if (reg_class_subset_p (regclass, GENERAL_REGS))
28168 /* Floats can load 0 and 1 plus some others. Note that we eliminated
28169 zero above. We only want to wind up preferring 80387 registers if
28170 we plan on doing computation with them. */
28172 && standard_80387_constant_p (x) > 0)
28174 /* Limit class to non-sse. */
28175 if (regclass == FLOAT_SSE_REGS)
28177 if (regclass == FP_TOP_SSE_REGS)
28179 if (regclass == FP_SECOND_SSE_REGS)
28180 return FP_SECOND_REG;
28181 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
28188 /* Generally when we see PLUS here, it's the function invariant
28189 (plus soft-fp const_int). Which can only be computed into general
28191 if (GET_CODE (x) == PLUS)
28192 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
28194 /* QImode constants are easy to load, but non-constant QImode data
28195 must go into Q_REGS. */
28196 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
28198 if (reg_class_subset_p (regclass, Q_REGS))
28200 if (reg_class_subset_p (Q_REGS, regclass))
28208 /* Discourage putting floating-point values in SSE registers unless
28209 SSE math is being used, and likewise for the 387 registers. */
28211 ix86_preferred_output_reload_class (rtx x, reg_class_t regclass)
28213 enum machine_mode mode = GET_MODE (x);
28215 /* Restrict the output reload class to the register bank that we are doing
28216 math on. If we would like not to return a subset of CLASS, reject this
28217 alternative: if reload cannot do this, it will still use its choice. */
28218 mode = GET_MODE (x);
28219 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
28220 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
28222 if (X87_FLOAT_MODE_P (mode))
28224 if (regclass == FP_TOP_SSE_REGS)
28226 else if (regclass == FP_SECOND_SSE_REGS)
28227 return FP_SECOND_REG;
28229 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
28236 ix86_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
28237 enum machine_mode mode,
28238 secondary_reload_info *sri ATTRIBUTE_UNUSED)
28240 /* QImode spills from non-QI registers require
28241 intermediate register on 32bit targets. */
28243 && !in_p && mode == QImode
28244 && (rclass == GENERAL_REGS
28245 || rclass == LEGACY_REGS
28246 || rclass == INDEX_REGS))
28255 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
28256 regno = true_regnum (x);
28258 /* Return Q_REGS if the operand is in memory. */
28263 /* This condition handles corner case where an expression involving
28264 pointers gets vectorized. We're trying to use the address of a
28265 stack slot as a vector initializer.
28267 (set (reg:V2DI 74 [ vect_cst_.2 ])
28268 (vec_duplicate:V2DI (reg/f:DI 20 frame)))
28270 Eventually frame gets turned into sp+offset like this:
28272 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28273 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28274 (const_int 392 [0x188]))))
28276 That later gets turned into:
28278 (set (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28279 (vec_duplicate:V2DI (plus:DI (reg/f:DI 7 sp)
28280 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))))
28282 We'll have the following reload recorded:
28284 Reload 0: reload_in (DI) =
28285 (plus:DI (reg/f:DI 7 sp)
28286 (mem/u/c/i:DI (symbol_ref/u:DI ("*.LC0") [flags 0x2]) [0 S8 A64]))
28287 reload_out (V2DI) = (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28288 SSE_REGS, RELOAD_OTHER (opnum = 0), can't combine
28289 reload_in_reg: (plus:DI (reg/f:DI 7 sp) (const_int 392 [0x188]))
28290 reload_out_reg: (reg:V2DI 21 xmm0 [orig:74 vect_cst_.2 ] [74])
28291 reload_reg_rtx: (reg:V2DI 22 xmm1)
28293 Which isn't going to work since SSE instructions can't handle scalar
28294 additions. Returning GENERAL_REGS forces the addition into integer
28295 register and reload can handle subsequent reloads without problems. */
28297 if (in_p && GET_CODE (x) == PLUS
28298 && SSE_CLASS_P (rclass)
28299 && SCALAR_INT_MODE_P (mode))
28300 return GENERAL_REGS;
28305 /* Implement TARGET_CLASS_LIKELY_SPILLED_P. */
28308 ix86_class_likely_spilled_p (reg_class_t rclass)
28319 case SSE_FIRST_REG:
28321 case FP_SECOND_REG:
28331 /* If we are copying between general and FP registers, we need a memory
28332 location. The same is true for SSE and MMX registers.
28334 To optimize register_move_cost performance, allow inline variant.
28336 The macro can't work reliably when one of the CLASSES is class containing
28337 registers from multiple units (SSE, MMX, integer). We avoid this by never
28338 combining those units in single alternative in the machine description.
28339 Ensure that this constraint holds to avoid unexpected surprises.
28341 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
28342 enforce these sanity checks. */
28345 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28346 enum machine_mode mode, int strict)
28348 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
28349 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
28350 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
28351 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
28352 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
28353 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
28355 gcc_assert (!strict);
28359 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
28362 /* ??? This is a lie. We do have moves between mmx/general, and for
28363 mmx/sse2. But by saying we need secondary memory we discourage the
28364 register allocator from using the mmx registers unless needed. */
28365 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
28368 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28370 /* SSE1 doesn't have any direct moves from other classes. */
28374 /* If the target says that inter-unit moves are more expensive
28375 than moving through memory, then don't generate them. */
28376 if (!TARGET_INTER_UNIT_MOVES)
28379 /* Between SSE and general, we have moves no larger than word size. */
28380 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
28388 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
28389 enum machine_mode mode, int strict)
28391 return inline_secondary_memory_needed (class1, class2, mode, strict);
28394 /* Implement the TARGET_CLASS_MAX_NREGS hook.
28396 On the 80386, this is the size of MODE in words,
28397 except in the FP regs, where a single reg is always enough. */
28399 static unsigned char
28400 ix86_class_max_nregs (reg_class_t rclass, enum machine_mode mode)
28402 if (MAYBE_INTEGER_CLASS_P (rclass))
28404 if (mode == XFmode)
28405 return (TARGET_64BIT ? 2 : 3);
28406 else if (mode == XCmode)
28407 return (TARGET_64BIT ? 4 : 6);
28409 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
28413 if (COMPLEX_MODE_P (mode))
28420 /* Return true if the registers in CLASS cannot represent the change from
28421 modes FROM to TO. */
28424 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
28425 enum reg_class regclass)
28430 /* x87 registers can't do subreg at all, as all values are reformatted
28431 to extended precision. */
28432 if (MAYBE_FLOAT_CLASS_P (regclass))
28435 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
28437 /* Vector registers do not support QI or HImode loads. If we don't
28438 disallow a change to these modes, reload will assume it's ok to
28439 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
28440 the vec_dupv4hi pattern. */
28441 if (GET_MODE_SIZE (from) < 4)
28444 /* Vector registers do not support subreg with nonzero offsets, which
28445 are otherwise valid for integer registers. Since we can't see
28446 whether we have a nonzero offset from here, prohibit all
28447 nonparadoxical subregs changing size. */
28448 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
28455 /* Return the cost of moving data of mode M between a
28456 register and memory. A value of 2 is the default; this cost is
28457 relative to those in `REGISTER_MOVE_COST'.
28459 This function is used extensively by register_move_cost that is used to
28460 build tables at startup. Make it inline in this case.
28461 When IN is 2, return maximum of in and out move cost.
28463 If moving between registers and memory is more expensive than
28464 between two registers, you should define this macro to express the
28467 Model also increased moving costs of QImode registers in non
28471 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
28475 if (FLOAT_CLASS_P (regclass))
28493 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
28494 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
28496 if (SSE_CLASS_P (regclass))
28499 switch (GET_MODE_SIZE (mode))
28514 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
28515 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
28517 if (MMX_CLASS_P (regclass))
28520 switch (GET_MODE_SIZE (mode))
28532 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
28533 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
28535 switch (GET_MODE_SIZE (mode))
28538 if (Q_CLASS_P (regclass) || TARGET_64BIT)
28541 return ix86_cost->int_store[0];
28542 if (TARGET_PARTIAL_REG_DEPENDENCY
28543 && optimize_function_for_speed_p (cfun))
28544 cost = ix86_cost->movzbl_load;
28546 cost = ix86_cost->int_load[0];
28548 return MAX (cost, ix86_cost->int_store[0]);
28554 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
28556 return ix86_cost->movzbl_load;
28558 return ix86_cost->int_store[0] + 4;
28563 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
28564 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
28566 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
28567 if (mode == TFmode)
28570 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
28572 cost = ix86_cost->int_load[2];
28574 cost = ix86_cost->int_store[2];
28575 return (cost * (((int) GET_MODE_SIZE (mode)
28576 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
28581 ix86_memory_move_cost (enum machine_mode mode, reg_class_t regclass,
28584 return inline_memory_move_cost (mode, (enum reg_class) regclass, in ? 1 : 0);
28588 /* Return the cost of moving data from a register in class CLASS1 to
28589 one in class CLASS2.
28591 It is not required that the cost always equal 2 when FROM is the same as TO;
28592 on some machines it is expensive to move between registers if they are not
28593 general registers. */
28596 ix86_register_move_cost (enum machine_mode mode, reg_class_t class1_i,
28597 reg_class_t class2_i)
28599 enum reg_class class1 = (enum reg_class) class1_i;
28600 enum reg_class class2 = (enum reg_class) class2_i;
28602 /* In case we require secondary memory, compute cost of the store followed
28603 by load. In order to avoid bad register allocation choices, we need
28604 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
28606 if (inline_secondary_memory_needed (class1, class2, mode, 0))
28610 cost += inline_memory_move_cost (mode, class1, 2);
28611 cost += inline_memory_move_cost (mode, class2, 2);
28613 /* In case of copying from general_purpose_register we may emit multiple
28614 stores followed by single load causing memory size mismatch stall.
28615 Count this as arbitrarily high cost of 20. */
28616 if (targetm.class_max_nregs (class1, mode)
28617 > targetm.class_max_nregs (class2, mode))
28620 /* In the case of FP/MMX moves, the registers actually overlap, and we
28621 have to switch modes in order to treat them differently. */
28622 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
28623 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
28629 /* Moves between SSE/MMX and integer unit are expensive. */
28630 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
28631 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
28633 /* ??? By keeping returned value relatively high, we limit the number
28634 of moves between integer and MMX/SSE registers for all targets.
28635 Additionally, high value prevents problem with x86_modes_tieable_p(),
28636 where integer modes in MMX/SSE registers are not tieable
28637 because of missing QImode and HImode moves to, from or between
28638 MMX/SSE registers. */
28639 return MAX (8, ix86_cost->mmxsse_to_integer);
28641 if (MAYBE_FLOAT_CLASS_P (class1))
28642 return ix86_cost->fp_move;
28643 if (MAYBE_SSE_CLASS_P (class1))
28644 return ix86_cost->sse_move;
28645 if (MAYBE_MMX_CLASS_P (class1))
28646 return ix86_cost->mmx_move;
28650 /* Return TRUE if hard register REGNO can hold a value of machine-mode
28654 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
28656 /* Flags and only flags can only hold CCmode values. */
28657 if (CC_REGNO_P (regno))
28658 return GET_MODE_CLASS (mode) == MODE_CC;
28659 if (GET_MODE_CLASS (mode) == MODE_CC
28660 || GET_MODE_CLASS (mode) == MODE_RANDOM
28661 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
28663 if (FP_REGNO_P (regno))
28664 return VALID_FP_MODE_P (mode);
28665 if (SSE_REGNO_P (regno))
28667 /* We implement the move patterns for all vector modes into and
28668 out of SSE registers, even when no operation instructions
28669 are available. OImode move is available only when AVX is
28671 return ((TARGET_AVX && mode == OImode)
28672 || VALID_AVX256_REG_MODE (mode)
28673 || VALID_SSE_REG_MODE (mode)
28674 || VALID_SSE2_REG_MODE (mode)
28675 || VALID_MMX_REG_MODE (mode)
28676 || VALID_MMX_REG_MODE_3DNOW (mode));
28678 if (MMX_REGNO_P (regno))
28680 /* We implement the move patterns for 3DNOW modes even in MMX mode,
28681 so if the register is available at all, then we can move data of
28682 the given mode into or out of it. */
28683 return (VALID_MMX_REG_MODE (mode)
28684 || VALID_MMX_REG_MODE_3DNOW (mode));
28687 if (mode == QImode)
28689 /* Take care for QImode values - they can be in non-QI regs,
28690 but then they do cause partial register stalls. */
28691 if (regno <= BX_REG || TARGET_64BIT)
28693 if (!TARGET_PARTIAL_REG_STALL)
28695 return !can_create_pseudo_p ();
28697 /* We handle both integer and floats in the general purpose registers. */
28698 else if (VALID_INT_MODE_P (mode))
28700 else if (VALID_FP_MODE_P (mode))
28702 else if (VALID_DFP_MODE_P (mode))
28704 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
28705 on to use that value in smaller contexts, this can easily force a
28706 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
28707 supporting DImode, allow it. */
28708 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
28714 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
28715 tieable integer mode. */
28718 ix86_tieable_integer_mode_p (enum machine_mode mode)
28727 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
28730 return TARGET_64BIT;
28737 /* Return true if MODE1 is accessible in a register that can hold MODE2
28738 without copying. That is, all register classes that can hold MODE2
28739 can also hold MODE1. */
28742 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
28744 if (mode1 == mode2)
28747 if (ix86_tieable_integer_mode_p (mode1)
28748 && ix86_tieable_integer_mode_p (mode2))
28751 /* MODE2 being XFmode implies fp stack or general regs, which means we
28752 can tie any smaller floating point modes to it. Note that we do not
28753 tie this with TFmode. */
28754 if (mode2 == XFmode)
28755 return mode1 == SFmode || mode1 == DFmode;
28757 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
28758 that we can tie it with SFmode. */
28759 if (mode2 == DFmode)
28760 return mode1 == SFmode;
28762 /* If MODE2 is only appropriate for an SSE register, then tie with
28763 any other mode acceptable to SSE registers. */
28764 if (GET_MODE_SIZE (mode2) == 16
28765 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
28766 return (GET_MODE_SIZE (mode1) == 16
28767 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
28769 /* If MODE2 is appropriate for an MMX register, then tie
28770 with any other mode acceptable to MMX registers. */
28771 if (GET_MODE_SIZE (mode2) == 8
28772 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
28773 return (GET_MODE_SIZE (mode1) == 8
28774 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
28779 /* Compute a (partial) cost for rtx X. Return true if the complete
28780 cost has been computed, and false if subexpressions should be
28781 scanned. In either case, *TOTAL contains the cost result. */
28784 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
28786 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
28787 enum machine_mode mode = GET_MODE (x);
28788 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
28796 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
28798 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
28800 else if (flag_pic && SYMBOLIC_CONST (x)
28802 || (!GET_CODE (x) != LABEL_REF
28803 && (GET_CODE (x) != SYMBOL_REF
28804 || !SYMBOL_REF_LOCAL_P (x)))))
28811 if (mode == VOIDmode)
28814 switch (standard_80387_constant_p (x))
28819 default: /* Other constants */
28824 /* Start with (MEM (SYMBOL_REF)), since that's where
28825 it'll probably end up. Add a penalty for size. */
28826 *total = (COSTS_N_INSNS (1)
28827 + (flag_pic != 0 && !TARGET_64BIT)
28828 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
28834 /* The zero extensions is often completely free on x86_64, so make
28835 it as cheap as possible. */
28836 if (TARGET_64BIT && mode == DImode
28837 && GET_MODE (XEXP (x, 0)) == SImode)
28839 else if (TARGET_ZERO_EXTEND_WITH_AND)
28840 *total = cost->add;
28842 *total = cost->movzx;
28846 *total = cost->movsx;
28850 if (CONST_INT_P (XEXP (x, 1))
28851 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
28853 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
28856 *total = cost->add;
28859 if ((value == 2 || value == 3)
28860 && cost->lea <= cost->shift_const)
28862 *total = cost->lea;
28872 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
28874 if (CONST_INT_P (XEXP (x, 1)))
28876 if (INTVAL (XEXP (x, 1)) > 32)
28877 *total = cost->shift_const + COSTS_N_INSNS (2);
28879 *total = cost->shift_const * 2;
28883 if (GET_CODE (XEXP (x, 1)) == AND)
28884 *total = cost->shift_var * 2;
28886 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
28891 if (CONST_INT_P (XEXP (x, 1)))
28892 *total = cost->shift_const;
28894 *total = cost->shift_var;
28902 gcc_assert (FLOAT_MODE_P (mode));
28903 gcc_assert (TARGET_FMA || TARGET_FMA4);
28905 /* ??? SSE scalar/vector cost should be used here. */
28906 /* ??? Bald assumption that fma has the same cost as fmul. */
28907 *total = cost->fmul;
28908 *total += rtx_cost (XEXP (x, 1), FMA, speed);
28910 /* Negate in op0 or op2 is free: FMS, FNMA, FNMS. */
28912 if (GET_CODE (sub) == NEG)
28913 sub = XEXP (sub, 0);
28914 *total += rtx_cost (sub, FMA, speed);
28917 if (GET_CODE (sub) == NEG)
28918 sub = XEXP (sub, 0);
28919 *total += rtx_cost (sub, FMA, speed);
28924 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
28926 /* ??? SSE scalar cost should be used here. */
28927 *total = cost->fmul;
28930 else if (X87_FLOAT_MODE_P (mode))
28932 *total = cost->fmul;
28935 else if (FLOAT_MODE_P (mode))
28937 /* ??? SSE vector cost should be used here. */
28938 *total = cost->fmul;
28943 rtx op0 = XEXP (x, 0);
28944 rtx op1 = XEXP (x, 1);
28946 if (CONST_INT_P (XEXP (x, 1)))
28948 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
28949 for (nbits = 0; value != 0; value &= value - 1)
28953 /* This is arbitrary. */
28956 /* Compute costs correctly for widening multiplication. */
28957 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
28958 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
28959 == GET_MODE_SIZE (mode))
28961 int is_mulwiden = 0;
28962 enum machine_mode inner_mode = GET_MODE (op0);
28964 if (GET_CODE (op0) == GET_CODE (op1))
28965 is_mulwiden = 1, op1 = XEXP (op1, 0);
28966 else if (CONST_INT_P (op1))
28968 if (GET_CODE (op0) == SIGN_EXTEND)
28969 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
28972 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
28976 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
28979 *total = (cost->mult_init[MODE_INDEX (mode)]
28980 + nbits * cost->mult_bit
28981 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
28990 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
28991 /* ??? SSE cost should be used here. */
28992 *total = cost->fdiv;
28993 else if (X87_FLOAT_MODE_P (mode))
28994 *total = cost->fdiv;
28995 else if (FLOAT_MODE_P (mode))
28996 /* ??? SSE vector cost should be used here. */
28997 *total = cost->fdiv;
28999 *total = cost->divide[MODE_INDEX (mode)];
29003 if (GET_MODE_CLASS (mode) == MODE_INT
29004 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
29006 if (GET_CODE (XEXP (x, 0)) == PLUS
29007 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
29008 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
29009 && CONSTANT_P (XEXP (x, 1)))
29011 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
29012 if (val == 2 || val == 4 || val == 8)
29014 *total = cost->lea;
29015 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29016 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
29017 outer_code, speed);
29018 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29022 else if (GET_CODE (XEXP (x, 0)) == MULT
29023 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
29025 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
29026 if (val == 2 || val == 4 || val == 8)
29028 *total = cost->lea;
29029 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29030 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29034 else if (GET_CODE (XEXP (x, 0)) == PLUS)
29036 *total = cost->lea;
29037 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
29038 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
29039 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
29046 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29048 /* ??? SSE cost should be used here. */
29049 *total = cost->fadd;
29052 else if (X87_FLOAT_MODE_P (mode))
29054 *total = cost->fadd;
29057 else if (FLOAT_MODE_P (mode))
29059 /* ??? SSE vector cost should be used here. */
29060 *total = cost->fadd;
29068 if (!TARGET_64BIT && mode == DImode)
29070 *total = (cost->add * 2
29071 + (rtx_cost (XEXP (x, 0), outer_code, speed)
29072 << (GET_MODE (XEXP (x, 0)) != DImode))
29073 + (rtx_cost (XEXP (x, 1), outer_code, speed)
29074 << (GET_MODE (XEXP (x, 1)) != DImode)));
29080 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29082 /* ??? SSE cost should be used here. */
29083 *total = cost->fchs;
29086 else if (X87_FLOAT_MODE_P (mode))
29088 *total = cost->fchs;
29091 else if (FLOAT_MODE_P (mode))
29093 /* ??? SSE vector cost should be used here. */
29094 *total = cost->fchs;
29100 if (!TARGET_64BIT && mode == DImode)
29101 *total = cost->add * 2;
29103 *total = cost->add;
29107 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
29108 && XEXP (XEXP (x, 0), 1) == const1_rtx
29109 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
29110 && XEXP (x, 1) == const0_rtx)
29112 /* This kind of construct is implemented using test[bwl].
29113 Treat it as if we had an AND. */
29114 *total = (cost->add
29115 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
29116 + rtx_cost (const1_rtx, outer_code, speed));
29122 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
29127 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29128 /* ??? SSE cost should be used here. */
29129 *total = cost->fabs;
29130 else if (X87_FLOAT_MODE_P (mode))
29131 *total = cost->fabs;
29132 else if (FLOAT_MODE_P (mode))
29133 /* ??? SSE vector cost should be used here. */
29134 *total = cost->fabs;
29138 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
29139 /* ??? SSE cost should be used here. */
29140 *total = cost->fsqrt;
29141 else if (X87_FLOAT_MODE_P (mode))
29142 *total = cost->fsqrt;
29143 else if (FLOAT_MODE_P (mode))
29144 /* ??? SSE vector cost should be used here. */
29145 *total = cost->fsqrt;
29149 if (XINT (x, 1) == UNSPEC_TP)
29156 case VEC_DUPLICATE:
29157 /* ??? Assume all of these vector manipulation patterns are
29158 recognizable. In which case they all pretty much have the
29160 *total = COSTS_N_INSNS (1);
29170 static int current_machopic_label_num;
29172 /* Given a symbol name and its associated stub, write out the
29173 definition of the stub. */
29176 machopic_output_stub (FILE *file, const char *symb, const char *stub)
29178 unsigned int length;
29179 char *binder_name, *symbol_name, lazy_ptr_name[32];
29180 int label = ++current_machopic_label_num;
29182 /* For 64-bit we shouldn't get here. */
29183 gcc_assert (!TARGET_64BIT);
29185 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
29186 symb = targetm.strip_name_encoding (symb);
29188 length = strlen (stub);
29189 binder_name = XALLOCAVEC (char, length + 32);
29190 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
29192 length = strlen (symb);
29193 symbol_name = XALLOCAVEC (char, length + 32);
29194 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
29196 sprintf (lazy_ptr_name, "L%d$lz", label);
29198 if (MACHOPIC_ATT_STUB)
29199 switch_to_section (darwin_sections[machopic_picsymbol_stub3_section]);
29200 else if (MACHOPIC_PURE)
29201 switch_to_section (darwin_sections[machopic_picsymbol_stub2_section]);
29203 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
29205 fprintf (file, "%s:\n", stub);
29206 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29208 if (MACHOPIC_ATT_STUB)
29210 fprintf (file, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
29212 else if (MACHOPIC_PURE)
29215 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29216 rtx tmp = gen_rtx_REG (SImode, 2 /* ECX */);
29217 output_set_got (tmp, NULL_RTX); /* "CALL ___<cpu>.get_pc_thunk.cx". */
29218 fprintf (file, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n",
29219 label, lazy_ptr_name, label);
29220 fprintf (file, "\tjmp\t*%%ecx\n");
29223 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
29225 /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
29226 it needs no stub-binding-helper. */
29227 if (MACHOPIC_ATT_STUB)
29230 fprintf (file, "%s:\n", binder_name);
29234 fprintf (file, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name, binder_name);
29235 fprintf (file, "\tpushl\t%%ecx\n");
29238 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
29240 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
29242 /* N.B. Keep the correspondence of these
29243 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
29244 old-pic/new-pic/non-pic stubs; altering this will break
29245 compatibility with existing dylibs. */
29248 /* 25-byte PIC stub using "CALL get_pc_thunk". */
29249 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr2_section]);
29252 /* 16-byte -mdynamic-no-pic stub. */
29253 switch_to_section(darwin_sections[machopic_lazy_symbol_ptr3_section]);
29255 fprintf (file, "%s:\n", lazy_ptr_name);
29256 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29257 fprintf (file, ASM_LONG "%s\n", binder_name);
29259 #endif /* TARGET_MACHO */
29261 /* Order the registers for register allocator. */
29264 x86_order_regs_for_local_alloc (void)
29269 /* First allocate the local general purpose registers. */
29270 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29271 if (GENERAL_REGNO_P (i) && call_used_regs[i])
29272 reg_alloc_order [pos++] = i;
29274 /* Global general purpose registers. */
29275 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
29276 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
29277 reg_alloc_order [pos++] = i;
29279 /* x87 registers come first in case we are doing FP math
29281 if (!TARGET_SSE_MATH)
29282 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29283 reg_alloc_order [pos++] = i;
29285 /* SSE registers. */
29286 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
29287 reg_alloc_order [pos++] = i;
29288 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
29289 reg_alloc_order [pos++] = i;
29291 /* x87 registers. */
29292 if (TARGET_SSE_MATH)
29293 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
29294 reg_alloc_order [pos++] = i;
29296 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
29297 reg_alloc_order [pos++] = i;
29299 /* Initialize the rest of array as we do not allocate some registers
29301 while (pos < FIRST_PSEUDO_REGISTER)
29302 reg_alloc_order [pos++] = 0;
29305 /* Handle a "callee_pop_aggregate_return" attribute; arguments as
29306 in struct attribute_spec handler. */
29308 ix86_handle_callee_pop_aggregate_return (tree *node, tree name,
29310 int flags ATTRIBUTE_UNUSED,
29311 bool *no_add_attrs)
29313 if (TREE_CODE (*node) != FUNCTION_TYPE
29314 && TREE_CODE (*node) != METHOD_TYPE
29315 && TREE_CODE (*node) != FIELD_DECL
29316 && TREE_CODE (*node) != TYPE_DECL)
29318 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29320 *no_add_attrs = true;
29325 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
29327 *no_add_attrs = true;
29330 if (is_attribute_p ("callee_pop_aggregate_return", name))
29334 cst = TREE_VALUE (args);
29335 if (TREE_CODE (cst) != INTEGER_CST)
29337 warning (OPT_Wattributes,
29338 "%qE attribute requires an integer constant argument",
29340 *no_add_attrs = true;
29342 else if (compare_tree_int (cst, 0) != 0
29343 && compare_tree_int (cst, 1) != 0)
29345 warning (OPT_Wattributes,
29346 "argument to %qE attribute is neither zero, nor one",
29348 *no_add_attrs = true;
29357 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
29358 struct attribute_spec.handler. */
29360 ix86_handle_abi_attribute (tree *node, tree name,
29361 tree args ATTRIBUTE_UNUSED,
29362 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29364 if (TREE_CODE (*node) != FUNCTION_TYPE
29365 && TREE_CODE (*node) != METHOD_TYPE
29366 && TREE_CODE (*node) != FIELD_DECL
29367 && TREE_CODE (*node) != TYPE_DECL)
29369 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29371 *no_add_attrs = true;
29375 /* Can combine regparm with all attributes but fastcall. */
29376 if (is_attribute_p ("ms_abi", name))
29378 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
29380 error ("ms_abi and sysv_abi attributes are not compatible");
29385 else if (is_attribute_p ("sysv_abi", name))
29387 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
29389 error ("ms_abi and sysv_abi attributes are not compatible");
29398 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
29399 struct attribute_spec.handler. */
29401 ix86_handle_struct_attribute (tree *node, tree name,
29402 tree args ATTRIBUTE_UNUSED,
29403 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29406 if (DECL_P (*node))
29408 if (TREE_CODE (*node) == TYPE_DECL)
29409 type = &TREE_TYPE (*node);
29414 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
29415 || TREE_CODE (*type) == UNION_TYPE)))
29417 warning (OPT_Wattributes, "%qE attribute ignored",
29419 *no_add_attrs = true;
29422 else if ((is_attribute_p ("ms_struct", name)
29423 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
29424 || ((is_attribute_p ("gcc_struct", name)
29425 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
29427 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
29429 *no_add_attrs = true;
29436 ix86_handle_fndecl_attribute (tree *node, tree name,
29437 tree args ATTRIBUTE_UNUSED,
29438 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29440 if (TREE_CODE (*node) != FUNCTION_DECL)
29442 warning (OPT_Wattributes, "%qE attribute only applies to functions",
29444 *no_add_attrs = true;
29450 ix86_ms_bitfield_layout_p (const_tree record_type)
29452 return ((TARGET_MS_BITFIELD_LAYOUT
29453 && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
29454 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type)));
29457 /* Returns an expression indicating where the this parameter is
29458 located on entry to the FUNCTION. */
29461 x86_this_parameter (tree function)
29463 tree type = TREE_TYPE (function);
29464 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
29469 const int *parm_regs;
29471 if (ix86_function_type_abi (type) == MS_ABI)
29472 parm_regs = x86_64_ms_abi_int_parameter_registers;
29474 parm_regs = x86_64_int_parameter_registers;
29475 return gen_rtx_REG (DImode, parm_regs[aggr]);
29478 nregs = ix86_function_regparm (type, function);
29480 if (nregs > 0 && !stdarg_p (type))
29483 unsigned int ccvt = ix86_get_callcvt (type);
29485 if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
29486 regno = aggr ? DX_REG : CX_REG;
29487 else if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
29491 return gen_rtx_MEM (SImode,
29492 plus_constant (stack_pointer_rtx, 4));
29501 return gen_rtx_MEM (SImode,
29502 plus_constant (stack_pointer_rtx, 4));
29505 return gen_rtx_REG (SImode, regno);
29508 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
29511 /* Determine whether x86_output_mi_thunk can succeed. */
29514 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
29515 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
29516 HOST_WIDE_INT vcall_offset, const_tree function)
29518 /* 64-bit can handle anything. */
29522 /* For 32-bit, everything's fine if we have one free register. */
29523 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
29526 /* Need a free register for vcall_offset. */
29530 /* Need a free register for GOT references. */
29531 if (flag_pic && !targetm.binds_local_p (function))
29534 /* Otherwise ok. */
29538 /* Output the assembler code for a thunk function. THUNK_DECL is the
29539 declaration for the thunk function itself, FUNCTION is the decl for
29540 the target function. DELTA is an immediate constant offset to be
29541 added to THIS. If VCALL_OFFSET is nonzero, the word at
29542 *(*this + vcall_offset) should be added to THIS. */
29545 x86_output_mi_thunk (FILE *file,
29546 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
29547 HOST_WIDE_INT vcall_offset, tree function)
29549 rtx this_param = x86_this_parameter (function);
29550 rtx this_reg, tmp, fnaddr;
29552 emit_note (NOTE_INSN_PROLOGUE_END);
29554 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
29555 pull it in now and let DELTA benefit. */
29556 if (REG_P (this_param))
29557 this_reg = this_param;
29558 else if (vcall_offset)
29560 /* Put the this parameter into %eax. */
29561 this_reg = gen_rtx_REG (Pmode, AX_REG);
29562 emit_move_insn (this_reg, this_param);
29565 this_reg = NULL_RTX;
29567 /* Adjust the this parameter by a fixed constant. */
29570 rtx delta_rtx = GEN_INT (delta);
29571 rtx delta_dst = this_reg ? this_reg : this_param;
29575 if (!x86_64_general_operand (delta_rtx, Pmode))
29577 tmp = gen_rtx_REG (Pmode, R10_REG);
29578 emit_move_insn (tmp, delta_rtx);
29583 emit_insn (ix86_gen_add3 (delta_dst, delta_dst, delta_rtx));
29586 /* Adjust the this parameter by a value stored in the vtable. */
29589 rtx vcall_addr, vcall_mem, this_mem;
29590 unsigned int tmp_regno;
29593 tmp_regno = R10_REG;
29596 unsigned int ccvt = ix86_get_callcvt (TREE_TYPE (function));
29597 if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) != 0)
29598 tmp_regno = AX_REG;
29600 tmp_regno = CX_REG;
29602 tmp = gen_rtx_REG (Pmode, tmp_regno);
29604 this_mem = gen_rtx_MEM (ptr_mode, this_reg);
29605 if (Pmode != ptr_mode)
29606 this_mem = gen_rtx_ZERO_EXTEND (Pmode, this_mem);
29607 emit_move_insn (tmp, this_mem);
29609 /* Adjust the this parameter. */
29610 vcall_addr = plus_constant (tmp, vcall_offset);
29612 && !ix86_legitimate_address_p (ptr_mode, vcall_addr, true))
29614 rtx tmp2 = gen_rtx_REG (Pmode, R11_REG);
29615 emit_move_insn (tmp2, GEN_INT (vcall_offset));
29616 vcall_addr = gen_rtx_PLUS (Pmode, tmp, tmp2);
29619 vcall_mem = gen_rtx_MEM (ptr_mode, vcall_addr);
29620 if (Pmode != ptr_mode)
29621 emit_insn (gen_addsi_1_zext (this_reg,
29622 gen_rtx_REG (ptr_mode,
29626 emit_insn (ix86_gen_add3 (this_reg, this_reg, vcall_mem));
29629 /* If necessary, drop THIS back to its stack slot. */
29630 if (this_reg && this_reg != this_param)
29631 emit_move_insn (this_param, this_reg);
29633 fnaddr = XEXP (DECL_RTL (function), 0);
29636 if (!flag_pic || targetm.binds_local_p (function)
29637 || cfun->machine->call_abi == MS_ABI)
29641 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, fnaddr), UNSPEC_GOTPCREL);
29642 tmp = gen_rtx_CONST (Pmode, tmp);
29643 fnaddr = gen_rtx_MEM (Pmode, tmp);
29648 if (!flag_pic || targetm.binds_local_p (function))
29651 else if (TARGET_MACHO)
29653 fnaddr = machopic_indirect_call_target (DECL_RTL (function));
29654 fnaddr = XEXP (fnaddr, 0);
29656 #endif /* TARGET_MACHO */
29659 tmp = gen_rtx_REG (Pmode, CX_REG);
29660 output_set_got (tmp, NULL_RTX);
29662 fnaddr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, fnaddr), UNSPEC_GOT);
29663 fnaddr = gen_rtx_PLUS (Pmode, fnaddr, tmp);
29664 fnaddr = gen_rtx_MEM (Pmode, fnaddr);
29668 /* Our sibling call patterns do not allow memories, because we have no
29669 predicate that can distinguish between frame and non-frame memory.
29670 For our purposes here, we can get away with (ab)using a jump pattern,
29671 because we're going to do no optimization. */
29672 if (MEM_P (fnaddr))
29673 emit_jump_insn (gen_indirect_jump (fnaddr));
29676 tmp = gen_rtx_MEM (QImode, fnaddr);
29677 tmp = gen_rtx_CALL (VOIDmode, tmp, const0_rtx);
29678 tmp = emit_call_insn (tmp);
29679 SIBLING_CALL_P (tmp) = 1;
29683 /* Emit just enough of rest_of_compilation to get the insns emitted.
29684 Note that use_thunk calls assemble_start_function et al. */
29685 tmp = get_insns ();
29686 insn_locators_alloc ();
29687 shorten_branches (tmp);
29688 final_start_function (tmp, file, 1);
29689 final (tmp, file, 1);
29690 final_end_function ();
29694 x86_file_start (void)
29696 default_file_start ();
29698 darwin_file_start ();
29700 if (X86_FILE_START_VERSION_DIRECTIVE)
29701 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
29702 if (X86_FILE_START_FLTUSED)
29703 fputs ("\t.global\t__fltused\n", asm_out_file);
29704 if (ix86_asm_dialect == ASM_INTEL)
29705 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
29709 x86_field_alignment (tree field, int computed)
29711 enum machine_mode mode;
29712 tree type = TREE_TYPE (field);
29714 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
29716 mode = TYPE_MODE (strip_array_types (type));
29717 if (mode == DFmode || mode == DCmode
29718 || GET_MODE_CLASS (mode) == MODE_INT
29719 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
29720 return MIN (32, computed);
29724 /* Output assembler code to FILE to increment profiler label # LABELNO
29725 for profiling a function entry. */
29727 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
29729 const char *mcount_name = (flag_fentry ? MCOUNT_NAME_BEFORE_PROLOGUE
29734 #ifndef NO_PROFILE_COUNTERS
29735 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
29738 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
29739 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", mcount_name);
29741 fprintf (file, "\tcall\t%s\n", mcount_name);
29745 #ifndef NO_PROFILE_COUNTERS
29746 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
29749 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", mcount_name);
29753 #ifndef NO_PROFILE_COUNTERS
29754 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
29757 fprintf (file, "\tcall\t%s\n", mcount_name);
29761 /* We don't have exact information about the insn sizes, but we may assume
29762 quite safely that we are informed about all 1 byte insns and memory
29763 address sizes. This is enough to eliminate unnecessary padding in
29767 min_insn_size (rtx insn)
29771 if (!INSN_P (insn) || !active_insn_p (insn))
29774 /* Discard alignments we've emit and jump instructions. */
29775 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
29776 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
29778 if (JUMP_TABLE_DATA_P (insn))
29781 /* Important case - calls are always 5 bytes.
29782 It is common to have many calls in the row. */
29784 && symbolic_reference_mentioned_p (PATTERN (insn))
29785 && !SIBLING_CALL_P (insn))
29787 len = get_attr_length (insn);
29791 /* For normal instructions we rely on get_attr_length being exact,
29792 with a few exceptions. */
29793 if (!JUMP_P (insn))
29795 enum attr_type type = get_attr_type (insn);
29800 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
29801 || asm_noperands (PATTERN (insn)) >= 0)
29808 /* Otherwise trust get_attr_length. */
29812 l = get_attr_length_address (insn);
29813 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
29822 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
29824 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
29828 ix86_avoid_jump_mispredicts (void)
29830 rtx insn, start = get_insns ();
29831 int nbytes = 0, njumps = 0;
29834 /* Look for all minimal intervals of instructions containing 4 jumps.
29835 The intervals are bounded by START and INSN. NBYTES is the total
29836 size of instructions in the interval including INSN and not including
29837 START. When the NBYTES is smaller than 16 bytes, it is possible
29838 that the end of START and INSN ends up in the same 16byte page.
29840 The smallest offset in the page INSN can start is the case where START
29841 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
29842 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
29844 for (insn = start; insn; insn = NEXT_INSN (insn))
29848 if (LABEL_P (insn))
29850 int align = label_to_alignment (insn);
29851 int max_skip = label_to_max_skip (insn);
29855 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
29856 already in the current 16 byte page, because otherwise
29857 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
29858 bytes to reach 16 byte boundary. */
29860 || (align <= 3 && max_skip != (1 << align) - 1))
29863 fprintf (dump_file, "Label %i with max_skip %i\n",
29864 INSN_UID (insn), max_skip);
29867 while (nbytes + max_skip >= 16)
29869 start = NEXT_INSN (start);
29870 if ((JUMP_P (start)
29871 && GET_CODE (PATTERN (start)) != ADDR_VEC
29872 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
29874 njumps--, isjump = 1;
29877 nbytes -= min_insn_size (start);
29883 min_size = min_insn_size (insn);
29884 nbytes += min_size;
29886 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
29887 INSN_UID (insn), min_size);
29889 && GET_CODE (PATTERN (insn)) != ADDR_VEC
29890 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
29898 start = NEXT_INSN (start);
29899 if ((JUMP_P (start)
29900 && GET_CODE (PATTERN (start)) != ADDR_VEC
29901 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
29903 njumps--, isjump = 1;
29906 nbytes -= min_insn_size (start);
29908 gcc_assert (njumps >= 0);
29910 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
29911 INSN_UID (start), INSN_UID (insn), nbytes);
29913 if (njumps == 3 && isjump && nbytes < 16)
29915 int padsize = 15 - nbytes + min_insn_size (insn);
29918 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
29919 INSN_UID (insn), padsize);
29920 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
29926 /* AMD Athlon works faster
29927 when RET is not destination of conditional jump or directly preceded
29928 by other jump instruction. We avoid the penalty by inserting NOP just
29929 before the RET instructions in such cases. */
29931 ix86_pad_returns (void)
29936 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
29938 basic_block bb = e->src;
29939 rtx ret = BB_END (bb);
29941 bool replace = false;
29943 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
29944 || optimize_bb_for_size_p (bb))
29946 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
29947 if (active_insn_p (prev) || LABEL_P (prev))
29949 if (prev && LABEL_P (prev))
29954 FOR_EACH_EDGE (e, ei, bb->preds)
29955 if (EDGE_FREQUENCY (e) && e->src->index >= 0
29956 && !(e->flags & EDGE_FALLTHRU))
29961 prev = prev_active_insn (ret);
29963 && ((JUMP_P (prev) && any_condjump_p (prev))
29966 /* Empty functions get branch mispredict even when
29967 the jump destination is not visible to us. */
29968 if (!prev && !optimize_function_for_size_p (cfun))
29973 emit_jump_insn_before (gen_return_internal_long (), ret);
29979 /* Count the minimum number of instructions in BB. Return 4 if the
29980 number of instructions >= 4. */
29983 ix86_count_insn_bb (basic_block bb)
29986 int insn_count = 0;
29988 /* Count number of instructions in this block. Return 4 if the number
29989 of instructions >= 4. */
29990 FOR_BB_INSNS (bb, insn)
29992 /* Only happen in exit blocks. */
29994 && GET_CODE (PATTERN (insn)) == RETURN)
29997 if (NONDEBUG_INSN_P (insn)
29998 && GET_CODE (PATTERN (insn)) != USE
29999 && GET_CODE (PATTERN (insn)) != CLOBBER)
30002 if (insn_count >= 4)
30011 /* Count the minimum number of instructions in code path in BB.
30012 Return 4 if the number of instructions >= 4. */
30015 ix86_count_insn (basic_block bb)
30019 int min_prev_count;
30021 /* Only bother counting instructions along paths with no
30022 more than 2 basic blocks between entry and exit. Given
30023 that BB has an edge to exit, determine if a predecessor
30024 of BB has an edge from entry. If so, compute the number
30025 of instructions in the predecessor block. If there
30026 happen to be multiple such blocks, compute the minimum. */
30027 min_prev_count = 4;
30028 FOR_EACH_EDGE (e, ei, bb->preds)
30031 edge_iterator prev_ei;
30033 if (e->src == ENTRY_BLOCK_PTR)
30035 min_prev_count = 0;
30038 FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
30040 if (prev_e->src == ENTRY_BLOCK_PTR)
30042 int count = ix86_count_insn_bb (e->src);
30043 if (count < min_prev_count)
30044 min_prev_count = count;
30050 if (min_prev_count < 4)
30051 min_prev_count += ix86_count_insn_bb (bb);
30053 return min_prev_count;
30056 /* Pad short funtion to 4 instructions. */
30059 ix86_pad_short_function (void)
30064 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
30066 rtx ret = BB_END (e->src);
30067 if (JUMP_P (ret) && GET_CODE (PATTERN (ret)) == RETURN)
30069 int insn_count = ix86_count_insn (e->src);
30071 /* Pad short function. */
30072 if (insn_count < 4)
30076 /* Find epilogue. */
30079 || NOTE_KIND (insn) != NOTE_INSN_EPILOGUE_BEG))
30080 insn = PREV_INSN (insn);
30085 /* Two NOPs count as one instruction. */
30086 insn_count = 2 * (4 - insn_count);
30087 emit_insn_before (gen_nops (GEN_INT (insn_count)), insn);
30093 /* Implement machine specific optimizations. We implement padding of returns
30094 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
30098 /* We are freeing block_for_insn in the toplev to keep compatibility
30099 with old MDEP_REORGS that are not CFG based. Recompute it now. */
30100 compute_bb_for_insn ();
30102 /* Run the vzeroupper optimization if needed. */
30103 if (TARGET_VZEROUPPER)
30104 move_or_delete_vzeroupper ();
30106 if (optimize && optimize_function_for_speed_p (cfun))
30108 if (TARGET_PAD_SHORT_FUNCTION)
30109 ix86_pad_short_function ();
30110 else if (TARGET_PAD_RETURNS)
30111 ix86_pad_returns ();
30112 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
30113 if (TARGET_FOUR_JUMP_LIMIT)
30114 ix86_avoid_jump_mispredicts ();
30119 /* Return nonzero when QImode register that must be represented via REX prefix
30122 x86_extended_QIreg_mentioned_p (rtx insn)
30125 extract_insn_cached (insn);
30126 for (i = 0; i < recog_data.n_operands; i++)
30127 if (REG_P (recog_data.operand[i])
30128 && REGNO (recog_data.operand[i]) > BX_REG)
30133 /* Return nonzero when P points to register encoded via REX prefix.
30134 Called via for_each_rtx. */
30136 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
30138 unsigned int regno;
30141 regno = REGNO (*p);
30142 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
30145 /* Return true when INSN mentions register that must be encoded using REX
30148 x86_extended_reg_mentioned_p (rtx insn)
30150 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
30151 extended_reg_mentioned_1, NULL);
30154 /* If profitable, negate (without causing overflow) integer constant
30155 of mode MODE at location LOC. Return true in this case. */
30157 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
30161 if (!CONST_INT_P (*loc))
30167 /* DImode x86_64 constants must fit in 32 bits. */
30168 gcc_assert (x86_64_immediate_operand (*loc, mode));
30179 gcc_unreachable ();
30182 /* Avoid overflows. */
30183 if (mode_signbit_p (mode, *loc))
30186 val = INTVAL (*loc);
30188 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
30189 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
30190 if ((val < 0 && val != -128)
30193 *loc = GEN_INT (-val);
30200 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
30201 optabs would emit if we didn't have TFmode patterns. */
30204 x86_emit_floatuns (rtx operands[2])
30206 rtx neglab, donelab, i0, i1, f0, in, out;
30207 enum machine_mode mode, inmode;
30209 inmode = GET_MODE (operands[1]);
30210 gcc_assert (inmode == SImode || inmode == DImode);
30213 in = force_reg (inmode, operands[1]);
30214 mode = GET_MODE (out);
30215 neglab = gen_label_rtx ();
30216 donelab = gen_label_rtx ();
30217 f0 = gen_reg_rtx (mode);
30219 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
30221 expand_float (out, in, 0);
30223 emit_jump_insn (gen_jump (donelab));
30226 emit_label (neglab);
30228 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
30230 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
30232 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
30234 expand_float (f0, i0, 0);
30236 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
30238 emit_label (donelab);
30241 /* AVX does not support 32-byte integer vector operations,
30242 thus the longest vector we are faced with is V16QImode. */
30243 #define MAX_VECT_LEN 16
30245 struct expand_vec_perm_d
30247 rtx target, op0, op1;
30248 unsigned char perm[MAX_VECT_LEN];
30249 enum machine_mode vmode;
30250 unsigned char nelt;
30254 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
30255 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
30257 /* Get a vector mode of the same size as the original but with elements
30258 twice as wide. This is only guaranteed to apply to integral vectors. */
30260 static inline enum machine_mode
30261 get_mode_wider_vector (enum machine_mode o)
30263 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
30264 enum machine_mode n = GET_MODE_WIDER_MODE (o);
30265 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
30266 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
30270 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30271 with all elements equal to VAR. Return true if successful. */
30274 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
30275 rtx target, rtx val)
30298 /* First attempt to recognize VAL as-is. */
30299 dup = gen_rtx_VEC_DUPLICATE (mode, val);
30300 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
30301 if (recog_memoized (insn) < 0)
30304 /* If that fails, force VAL into a register. */
30307 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
30308 seq = get_insns ();
30311 emit_insn_before (seq, insn);
30313 ok = recog_memoized (insn) >= 0;
30322 if (TARGET_SSE || TARGET_3DNOW_A)
30326 val = gen_lowpart (SImode, val);
30327 x = gen_rtx_TRUNCATE (HImode, val);
30328 x = gen_rtx_VEC_DUPLICATE (mode, x);
30329 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30342 struct expand_vec_perm_d dperm;
30346 memset (&dperm, 0, sizeof (dperm));
30347 dperm.target = target;
30348 dperm.vmode = mode;
30349 dperm.nelt = GET_MODE_NUNITS (mode);
30350 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
30352 /* Extend to SImode using a paradoxical SUBREG. */
30353 tmp1 = gen_reg_rtx (SImode);
30354 emit_move_insn (tmp1, gen_lowpart (SImode, val));
30356 /* Insert the SImode value as low element of a V4SImode vector. */
30357 tmp2 = gen_lowpart (V4SImode, dperm.op0);
30358 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
30360 ok = (expand_vec_perm_1 (&dperm)
30361 || expand_vec_perm_broadcast_1 (&dperm));
30373 /* Replicate the value once into the next wider mode and recurse. */
30375 enum machine_mode smode, wsmode, wvmode;
30378 smode = GET_MODE_INNER (mode);
30379 wvmode = get_mode_wider_vector (mode);
30380 wsmode = GET_MODE_INNER (wvmode);
30382 val = convert_modes (wsmode, smode, val, true);
30383 x = expand_simple_binop (wsmode, ASHIFT, val,
30384 GEN_INT (GET_MODE_BITSIZE (smode)),
30385 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30386 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
30388 x = gen_lowpart (wvmode, target);
30389 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
30397 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
30398 rtx x = gen_reg_rtx (hvmode);
30400 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
30403 x = gen_rtx_VEC_CONCAT (mode, x, x);
30404 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30413 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30414 whose ONE_VAR element is VAR, and other elements are zero. Return true
30418 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
30419 rtx target, rtx var, int one_var)
30421 enum machine_mode vsimode;
30424 bool use_vector_set = false;
30429 /* For SSE4.1, we normally use vector set. But if the second
30430 element is zero and inter-unit moves are OK, we use movq
30432 use_vector_set = (TARGET_64BIT
30434 && !(TARGET_INTER_UNIT_MOVES
30440 use_vector_set = TARGET_SSE4_1;
30443 use_vector_set = TARGET_SSE2;
30446 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
30453 use_vector_set = TARGET_AVX;
30456 /* Use ix86_expand_vector_set in 64bit mode only. */
30457 use_vector_set = TARGET_AVX && TARGET_64BIT;
30463 if (use_vector_set)
30465 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
30466 var = force_reg (GET_MODE_INNER (mode), var);
30467 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30483 var = force_reg (GET_MODE_INNER (mode), var);
30484 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
30485 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30490 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
30491 new_target = gen_reg_rtx (mode);
30493 new_target = target;
30494 var = force_reg (GET_MODE_INNER (mode), var);
30495 x = gen_rtx_VEC_DUPLICATE (mode, var);
30496 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
30497 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
30500 /* We need to shuffle the value to the correct position, so
30501 create a new pseudo to store the intermediate result. */
30503 /* With SSE2, we can use the integer shuffle insns. */
30504 if (mode != V4SFmode && TARGET_SSE2)
30506 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
30508 GEN_INT (one_var == 1 ? 0 : 1),
30509 GEN_INT (one_var == 2 ? 0 : 1),
30510 GEN_INT (one_var == 3 ? 0 : 1)));
30511 if (target != new_target)
30512 emit_move_insn (target, new_target);
30516 /* Otherwise convert the intermediate result to V4SFmode and
30517 use the SSE1 shuffle instructions. */
30518 if (mode != V4SFmode)
30520 tmp = gen_reg_rtx (V4SFmode);
30521 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
30526 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
30528 GEN_INT (one_var == 1 ? 0 : 1),
30529 GEN_INT (one_var == 2 ? 0+4 : 1+4),
30530 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
30532 if (mode != V4SFmode)
30533 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
30534 else if (tmp != target)
30535 emit_move_insn (target, tmp);
30537 else if (target != new_target)
30538 emit_move_insn (target, new_target);
30543 vsimode = V4SImode;
30549 vsimode = V2SImode;
30555 /* Zero extend the variable element to SImode and recurse. */
30556 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
30558 x = gen_reg_rtx (vsimode);
30559 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
30561 gcc_unreachable ();
30563 emit_move_insn (target, gen_lowpart (mode, x));
30571 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
30572 consisting of the values in VALS. It is known that all elements
30573 except ONE_VAR are constants. Return true if successful. */
30576 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
30577 rtx target, rtx vals, int one_var)
30579 rtx var = XVECEXP (vals, 0, one_var);
30580 enum machine_mode wmode;
30583 const_vec = copy_rtx (vals);
30584 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
30585 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
30593 /* For the two element vectors, it's just as easy to use
30594 the general case. */
30598 /* Use ix86_expand_vector_set in 64bit mode only. */
30621 /* There's no way to set one QImode entry easily. Combine
30622 the variable value with its adjacent constant value, and
30623 promote to an HImode set. */
30624 x = XVECEXP (vals, 0, one_var ^ 1);
30627 var = convert_modes (HImode, QImode, var, true);
30628 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
30629 NULL_RTX, 1, OPTAB_LIB_WIDEN);
30630 x = GEN_INT (INTVAL (x) & 0xff);
30634 var = convert_modes (HImode, QImode, var, true);
30635 x = gen_int_mode (INTVAL (x) << 8, HImode);
30637 if (x != const0_rtx)
30638 var = expand_simple_binop (HImode, IOR, var, x, var,
30639 1, OPTAB_LIB_WIDEN);
30641 x = gen_reg_rtx (wmode);
30642 emit_move_insn (x, gen_lowpart (wmode, const_vec));
30643 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
30645 emit_move_insn (target, gen_lowpart (mode, x));
30652 emit_move_insn (target, const_vec);
30653 ix86_expand_vector_set (mmx_ok, target, var, one_var);
30657 /* A subroutine of ix86_expand_vector_init_general. Use vector
30658 concatenate to handle the most general case: all values variable,
30659 and none identical. */
30662 ix86_expand_vector_init_concat (enum machine_mode mode,
30663 rtx target, rtx *ops, int n)
30665 enum machine_mode cmode, hmode = VOIDmode;
30666 rtx first[8], second[4];
30706 gcc_unreachable ();
30709 if (!register_operand (ops[1], cmode))
30710 ops[1] = force_reg (cmode, ops[1]);
30711 if (!register_operand (ops[0], cmode))
30712 ops[0] = force_reg (cmode, ops[0]);
30713 emit_insn (gen_rtx_SET (VOIDmode, target,
30714 gen_rtx_VEC_CONCAT (mode, ops[0],
30734 gcc_unreachable ();
30750 gcc_unreachable ();
30755 /* FIXME: We process inputs backward to help RA. PR 36222. */
30758 for (; i > 0; i -= 2, j--)
30760 first[j] = gen_reg_rtx (cmode);
30761 v = gen_rtvec (2, ops[i - 1], ops[i]);
30762 ix86_expand_vector_init (false, first[j],
30763 gen_rtx_PARALLEL (cmode, v));
30769 gcc_assert (hmode != VOIDmode);
30770 for (i = j = 0; i < n; i += 2, j++)
30772 second[j] = gen_reg_rtx (hmode);
30773 ix86_expand_vector_init_concat (hmode, second [j],
30777 ix86_expand_vector_init_concat (mode, target, second, n);
30780 ix86_expand_vector_init_concat (mode, target, first, n);
30784 gcc_unreachable ();
30788 /* A subroutine of ix86_expand_vector_init_general. Use vector
30789 interleave to handle the most general case: all values variable,
30790 and none identical. */
30793 ix86_expand_vector_init_interleave (enum machine_mode mode,
30794 rtx target, rtx *ops, int n)
30796 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
30799 rtx (*gen_load_even) (rtx, rtx, rtx);
30800 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
30801 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
30806 gen_load_even = gen_vec_setv8hi;
30807 gen_interleave_first_low = gen_vec_interleave_lowv4si;
30808 gen_interleave_second_low = gen_vec_interleave_lowv2di;
30809 inner_mode = HImode;
30810 first_imode = V4SImode;
30811 second_imode = V2DImode;
30812 third_imode = VOIDmode;
30815 gen_load_even = gen_vec_setv16qi;
30816 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
30817 gen_interleave_second_low = gen_vec_interleave_lowv4si;
30818 inner_mode = QImode;
30819 first_imode = V8HImode;
30820 second_imode = V4SImode;
30821 third_imode = V2DImode;
30824 gcc_unreachable ();
30827 for (i = 0; i < n; i++)
30829 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
30830 op0 = gen_reg_rtx (SImode);
30831 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
30833 /* Insert the SImode value as low element of V4SImode vector. */
30834 op1 = gen_reg_rtx (V4SImode);
30835 op0 = gen_rtx_VEC_MERGE (V4SImode,
30836 gen_rtx_VEC_DUPLICATE (V4SImode,
30838 CONST0_RTX (V4SImode),
30840 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
30842 /* Cast the V4SImode vector back to a vector in orignal mode. */
30843 op0 = gen_reg_rtx (mode);
30844 emit_move_insn (op0, gen_lowpart (mode, op1));
30846 /* Load even elements into the second positon. */
30847 emit_insn (gen_load_even (op0,
30848 force_reg (inner_mode,
30852 /* Cast vector to FIRST_IMODE vector. */
30853 ops[i] = gen_reg_rtx (first_imode);
30854 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
30857 /* Interleave low FIRST_IMODE vectors. */
30858 for (i = j = 0; i < n; i += 2, j++)
30860 op0 = gen_reg_rtx (first_imode);
30861 emit_insn (gen_interleave_first_low (op0, ops[i], ops[i + 1]));
30863 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
30864 ops[j] = gen_reg_rtx (second_imode);
30865 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
30868 /* Interleave low SECOND_IMODE vectors. */
30869 switch (second_imode)
30872 for (i = j = 0; i < n / 2; i += 2, j++)
30874 op0 = gen_reg_rtx (second_imode);
30875 emit_insn (gen_interleave_second_low (op0, ops[i],
30878 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
30880 ops[j] = gen_reg_rtx (third_imode);
30881 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
30883 second_imode = V2DImode;
30884 gen_interleave_second_low = gen_vec_interleave_lowv2di;
30888 op0 = gen_reg_rtx (second_imode);
30889 emit_insn (gen_interleave_second_low (op0, ops[0],
30892 /* Cast the SECOND_IMODE vector back to a vector on original
30894 emit_insn (gen_rtx_SET (VOIDmode, target,
30895 gen_lowpart (mode, op0)));
30899 gcc_unreachable ();
30903 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
30904 all values variable, and none identical. */
30907 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
30908 rtx target, rtx vals)
30910 rtx ops[32], op0, op1;
30911 enum machine_mode half_mode = VOIDmode;
30918 if (!mmx_ok && !TARGET_SSE)
30930 n = GET_MODE_NUNITS (mode);
30931 for (i = 0; i < n; i++)
30932 ops[i] = XVECEXP (vals, 0, i);
30933 ix86_expand_vector_init_concat (mode, target, ops, n);
30937 half_mode = V16QImode;
30941 half_mode = V8HImode;
30945 n = GET_MODE_NUNITS (mode);
30946 for (i = 0; i < n; i++)
30947 ops[i] = XVECEXP (vals, 0, i);
30948 op0 = gen_reg_rtx (half_mode);
30949 op1 = gen_reg_rtx (half_mode);
30950 ix86_expand_vector_init_interleave (half_mode, op0, ops,
30952 ix86_expand_vector_init_interleave (half_mode, op1,
30953 &ops [n >> 1], n >> 2);
30954 emit_insn (gen_rtx_SET (VOIDmode, target,
30955 gen_rtx_VEC_CONCAT (mode, op0, op1)));
30959 if (!TARGET_SSE4_1)
30967 /* Don't use ix86_expand_vector_init_interleave if we can't
30968 move from GPR to SSE register directly. */
30969 if (!TARGET_INTER_UNIT_MOVES)
30972 n = GET_MODE_NUNITS (mode);
30973 for (i = 0; i < n; i++)
30974 ops[i] = XVECEXP (vals, 0, i);
30975 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
30983 gcc_unreachable ();
30987 int i, j, n_elts, n_words, n_elt_per_word;
30988 enum machine_mode inner_mode;
30989 rtx words[4], shift;
30991 inner_mode = GET_MODE_INNER (mode);
30992 n_elts = GET_MODE_NUNITS (mode);
30993 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
30994 n_elt_per_word = n_elts / n_words;
30995 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
30997 for (i = 0; i < n_words; ++i)
30999 rtx word = NULL_RTX;
31001 for (j = 0; j < n_elt_per_word; ++j)
31003 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
31004 elt = convert_modes (word_mode, inner_mode, elt, true);
31010 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
31011 word, 1, OPTAB_LIB_WIDEN);
31012 word = expand_simple_binop (word_mode, IOR, word, elt,
31013 word, 1, OPTAB_LIB_WIDEN);
31021 emit_move_insn (target, gen_lowpart (mode, words[0]));
31022 else if (n_words == 2)
31024 rtx tmp = gen_reg_rtx (mode);
31025 emit_clobber (tmp);
31026 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
31027 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
31028 emit_move_insn (target, tmp);
31030 else if (n_words == 4)
31032 rtx tmp = gen_reg_rtx (V4SImode);
31033 gcc_assert (word_mode == SImode);
31034 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
31035 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
31036 emit_move_insn (target, gen_lowpart (mode, tmp));
31039 gcc_unreachable ();
31043 /* Initialize vector TARGET via VALS. Suppress the use of MMX
31044 instructions unless MMX_OK is true. */
31047 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
31049 enum machine_mode mode = GET_MODE (target);
31050 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31051 int n_elts = GET_MODE_NUNITS (mode);
31052 int n_var = 0, one_var = -1;
31053 bool all_same = true, all_const_zero = true;
31057 for (i = 0; i < n_elts; ++i)
31059 x = XVECEXP (vals, 0, i);
31060 if (!(CONST_INT_P (x)
31061 || GET_CODE (x) == CONST_DOUBLE
31062 || GET_CODE (x) == CONST_FIXED))
31063 n_var++, one_var = i;
31064 else if (x != CONST0_RTX (inner_mode))
31065 all_const_zero = false;
31066 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
31070 /* Constants are best loaded from the constant pool. */
31073 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
31077 /* If all values are identical, broadcast the value. */
31079 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
31080 XVECEXP (vals, 0, 0)))
31083 /* Values where only one field is non-constant are best loaded from
31084 the pool and overwritten via move later. */
31088 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
31089 XVECEXP (vals, 0, one_var),
31093 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
31097 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
31101 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
31103 enum machine_mode mode = GET_MODE (target);
31104 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31105 enum machine_mode half_mode;
31106 bool use_vec_merge = false;
31108 static rtx (*gen_extract[6][2]) (rtx, rtx)
31110 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
31111 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
31112 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
31113 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
31114 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
31115 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
31117 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
31119 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
31120 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
31121 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
31122 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
31123 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
31124 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
31134 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
31135 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
31137 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
31139 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
31140 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31146 use_vec_merge = TARGET_SSE4_1 && TARGET_64BIT;
31150 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
31151 ix86_expand_vector_extract (false, tmp, target, 1 - elt);
31153 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
31155 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
31156 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31163 /* For the two element vectors, we implement a VEC_CONCAT with
31164 the extraction of the other element. */
31166 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
31167 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
31170 op0 = val, op1 = tmp;
31172 op0 = tmp, op1 = val;
31174 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
31175 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31180 use_vec_merge = TARGET_SSE4_1;
31187 use_vec_merge = true;
31191 /* tmp = target = A B C D */
31192 tmp = copy_to_reg (target);
31193 /* target = A A B B */
31194 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
31195 /* target = X A B B */
31196 ix86_expand_vector_set (false, target, val, 0);
31197 /* target = A X C D */
31198 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31199 const1_rtx, const0_rtx,
31200 GEN_INT (2+4), GEN_INT (3+4)));
31204 /* tmp = target = A B C D */
31205 tmp = copy_to_reg (target);
31206 /* tmp = X B C D */
31207 ix86_expand_vector_set (false, tmp, val, 0);
31208 /* target = A B X D */
31209 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31210 const0_rtx, const1_rtx,
31211 GEN_INT (0+4), GEN_INT (3+4)));
31215 /* tmp = target = A B C D */
31216 tmp = copy_to_reg (target);
31217 /* tmp = X B C D */
31218 ix86_expand_vector_set (false, tmp, val, 0);
31219 /* target = A B X D */
31220 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
31221 const0_rtx, const1_rtx,
31222 GEN_INT (2+4), GEN_INT (0+4)));
31226 gcc_unreachable ();
31231 use_vec_merge = TARGET_SSE4_1;
31235 /* Element 0 handled by vec_merge below. */
31238 use_vec_merge = true;
31244 /* With SSE2, use integer shuffles to swap element 0 and ELT,
31245 store into element 0, then shuffle them back. */
31249 order[0] = GEN_INT (elt);
31250 order[1] = const1_rtx;
31251 order[2] = const2_rtx;
31252 order[3] = GEN_INT (3);
31253 order[elt] = const0_rtx;
31255 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31256 order[1], order[2], order[3]));
31258 ix86_expand_vector_set (false, target, val, 0);
31260 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
31261 order[1], order[2], order[3]));
31265 /* For SSE1, we have to reuse the V4SF code. */
31266 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
31267 gen_lowpart (SFmode, val), elt);
31272 use_vec_merge = TARGET_SSE2;
31275 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31279 use_vec_merge = TARGET_SSE4_1;
31286 half_mode = V16QImode;
31292 half_mode = V8HImode;
31298 half_mode = V4SImode;
31304 half_mode = V2DImode;
31310 half_mode = V4SFmode;
31316 half_mode = V2DFmode;
31322 /* Compute offset. */
31326 gcc_assert (i <= 1);
31328 /* Extract the half. */
31329 tmp = gen_reg_rtx (half_mode);
31330 emit_insn (gen_extract[j][i] (tmp, target));
31332 /* Put val in tmp at elt. */
31333 ix86_expand_vector_set (false, tmp, val, elt);
31336 emit_insn (gen_insert[j][i] (target, target, tmp));
31345 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
31346 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
31347 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31351 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31353 emit_move_insn (mem, target);
31355 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31356 emit_move_insn (tmp, val);
31358 emit_move_insn (target, mem);
31363 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
31365 enum machine_mode mode = GET_MODE (vec);
31366 enum machine_mode inner_mode = GET_MODE_INNER (mode);
31367 bool use_vec_extr = false;
31380 use_vec_extr = true;
31384 use_vec_extr = TARGET_SSE4_1;
31396 tmp = gen_reg_rtx (mode);
31397 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
31398 GEN_INT (elt), GEN_INT (elt),
31399 GEN_INT (elt+4), GEN_INT (elt+4)));
31403 tmp = gen_reg_rtx (mode);
31404 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
31408 gcc_unreachable ();
31411 use_vec_extr = true;
31416 use_vec_extr = TARGET_SSE4_1;
31430 tmp = gen_reg_rtx (mode);
31431 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
31432 GEN_INT (elt), GEN_INT (elt),
31433 GEN_INT (elt), GEN_INT (elt)));
31437 tmp = gen_reg_rtx (mode);
31438 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
31442 gcc_unreachable ();
31445 use_vec_extr = true;
31450 /* For SSE1, we have to reuse the V4SF code. */
31451 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
31452 gen_lowpart (V4SFmode, vec), elt);
31458 use_vec_extr = TARGET_SSE2;
31461 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
31465 use_vec_extr = TARGET_SSE4_1;
31469 /* ??? Could extract the appropriate HImode element and shift. */
31476 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
31477 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
31479 /* Let the rtl optimizers know about the zero extension performed. */
31480 if (inner_mode == QImode || inner_mode == HImode)
31482 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
31483 target = gen_lowpart (SImode, target);
31486 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
31490 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
31492 emit_move_insn (mem, vec);
31494 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
31495 emit_move_insn (target, tmp);
31499 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
31500 pattern to reduce; DEST is the destination; IN is the input vector. */
31503 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
31505 rtx tmp1, tmp2, tmp3;
31507 tmp1 = gen_reg_rtx (V4SFmode);
31508 tmp2 = gen_reg_rtx (V4SFmode);
31509 tmp3 = gen_reg_rtx (V4SFmode);
31511 emit_insn (gen_sse_movhlps (tmp1, in, in));
31512 emit_insn (fn (tmp2, tmp1, in));
31514 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
31515 const1_rtx, const1_rtx,
31516 GEN_INT (1+4), GEN_INT (1+4)));
31517 emit_insn (fn (dest, tmp2, tmp3));
31520 /* Target hook for scalar_mode_supported_p. */
31522 ix86_scalar_mode_supported_p (enum machine_mode mode)
31524 if (DECIMAL_FLOAT_MODE_P (mode))
31525 return default_decimal_float_supported_p ();
31526 else if (mode == TFmode)
31529 return default_scalar_mode_supported_p (mode);
31532 /* Implements target hook vector_mode_supported_p. */
31534 ix86_vector_mode_supported_p (enum machine_mode mode)
31536 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
31538 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
31540 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
31542 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
31544 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
31549 /* Target hook for c_mode_for_suffix. */
31550 static enum machine_mode
31551 ix86_c_mode_for_suffix (char suffix)
31561 /* Worker function for TARGET_MD_ASM_CLOBBERS.
31563 We do this in the new i386 backend to maintain source compatibility
31564 with the old cc0-based compiler. */
31567 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
31568 tree inputs ATTRIBUTE_UNUSED,
31571 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
31573 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
31578 /* Implements target vector targetm.asm.encode_section_info. */
31580 static void ATTRIBUTE_UNUSED
31581 ix86_encode_section_info (tree decl, rtx rtl, int first)
31583 default_encode_section_info (decl, rtl, first);
31585 if (TREE_CODE (decl) == VAR_DECL
31586 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
31587 && ix86_in_large_data_p (decl))
31588 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
31591 /* Worker function for REVERSE_CONDITION. */
31594 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
31596 return (mode != CCFPmode && mode != CCFPUmode
31597 ? reverse_condition (code)
31598 : reverse_condition_maybe_unordered (code));
31601 /* Output code to perform an x87 FP register move, from OPERANDS[1]
31605 output_387_reg_move (rtx insn, rtx *operands)
31607 if (REG_P (operands[0]))
31609 if (REG_P (operands[1])
31610 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31612 if (REGNO (operands[0]) == FIRST_STACK_REG)
31613 return output_387_ffreep (operands, 0);
31614 return "fstp\t%y0";
31616 if (STACK_TOP_P (operands[0]))
31617 return "fld%Z1\t%y1";
31620 else if (MEM_P (operands[0]))
31622 gcc_assert (REG_P (operands[1]));
31623 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
31624 return "fstp%Z0\t%y0";
31627 /* There is no non-popping store to memory for XFmode.
31628 So if we need one, follow the store with a load. */
31629 if (GET_MODE (operands[0]) == XFmode)
31630 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
31632 return "fst%Z0\t%y0";
31639 /* Output code to perform a conditional jump to LABEL, if C2 flag in
31640 FP status register is set. */
31643 ix86_emit_fp_unordered_jump (rtx label)
31645 rtx reg = gen_reg_rtx (HImode);
31648 emit_insn (gen_x86_fnstsw_1 (reg));
31650 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
31652 emit_insn (gen_x86_sahf_1 (reg));
31654 temp = gen_rtx_REG (CCmode, FLAGS_REG);
31655 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
31659 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
31661 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
31662 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
31665 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
31666 gen_rtx_LABEL_REF (VOIDmode, label),
31668 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
31670 emit_jump_insn (temp);
31671 predict_jump (REG_BR_PROB_BASE * 10 / 100);
31674 /* Output code to perform a log1p XFmode calculation. */
31676 void ix86_emit_i387_log1p (rtx op0, rtx op1)
31678 rtx label1 = gen_label_rtx ();
31679 rtx label2 = gen_label_rtx ();
31681 rtx tmp = gen_reg_rtx (XFmode);
31682 rtx tmp2 = gen_reg_rtx (XFmode);
31685 emit_insn (gen_absxf2 (tmp, op1));
31686 test = gen_rtx_GE (VOIDmode, tmp,
31687 CONST_DOUBLE_FROM_REAL_VALUE (
31688 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
31690 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
31692 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
31693 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
31694 emit_jump (label2);
31696 emit_label (label1);
31697 emit_move_insn (tmp, CONST1_RTX (XFmode));
31698 emit_insn (gen_addxf3 (tmp, op1, tmp));
31699 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
31700 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
31702 emit_label (label2);
31705 /* Output code to perform a Newton-Rhapson approximation of a single precision
31706 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
31708 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
31710 rtx x0, x1, e0, e1;
31712 x0 = gen_reg_rtx (mode);
31713 e0 = gen_reg_rtx (mode);
31714 e1 = gen_reg_rtx (mode);
31715 x1 = gen_reg_rtx (mode);
31717 /* a / b = a * ((rcp(b) + rcp(b)) - (b * rcp(b) * rcp (b))) */
31719 /* x0 = rcp(b) estimate */
31720 emit_insn (gen_rtx_SET (VOIDmode, x0,
31721 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
31724 emit_insn (gen_rtx_SET (VOIDmode, e0,
31725 gen_rtx_MULT (mode, x0, b)));
31728 emit_insn (gen_rtx_SET (VOIDmode, e0,
31729 gen_rtx_MULT (mode, x0, e0)));
31732 emit_insn (gen_rtx_SET (VOIDmode, e1,
31733 gen_rtx_PLUS (mode, x0, x0)));
31736 emit_insn (gen_rtx_SET (VOIDmode, x1,
31737 gen_rtx_MINUS (mode, e1, e0)));
31740 emit_insn (gen_rtx_SET (VOIDmode, res,
31741 gen_rtx_MULT (mode, a, x1)));
31744 /* Output code to perform a Newton-Rhapson approximation of a
31745 single precision floating point [reciprocal] square root. */
31747 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
31750 rtx x0, e0, e1, e2, e3, mthree, mhalf;
31753 x0 = gen_reg_rtx (mode);
31754 e0 = gen_reg_rtx (mode);
31755 e1 = gen_reg_rtx (mode);
31756 e2 = gen_reg_rtx (mode);
31757 e3 = gen_reg_rtx (mode);
31759 real_from_integer (&r, VOIDmode, -3, -1, 0);
31760 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
31762 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
31763 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
31765 if (VECTOR_MODE_P (mode))
31767 mthree = ix86_build_const_vector (mode, true, mthree);
31768 mhalf = ix86_build_const_vector (mode, true, mhalf);
31771 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
31772 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
31774 /* x0 = rsqrt(a) estimate */
31775 emit_insn (gen_rtx_SET (VOIDmode, x0,
31776 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
31779 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
31784 zero = gen_reg_rtx (mode);
31785 mask = gen_reg_rtx (mode);
31787 zero = force_reg (mode, CONST0_RTX(mode));
31788 emit_insn (gen_rtx_SET (VOIDmode, mask,
31789 gen_rtx_NE (mode, zero, a)));
31791 emit_insn (gen_rtx_SET (VOIDmode, x0,
31792 gen_rtx_AND (mode, x0, mask)));
31796 emit_insn (gen_rtx_SET (VOIDmode, e0,
31797 gen_rtx_MULT (mode, x0, a)));
31799 emit_insn (gen_rtx_SET (VOIDmode, e1,
31800 gen_rtx_MULT (mode, e0, x0)));
31803 mthree = force_reg (mode, mthree);
31804 emit_insn (gen_rtx_SET (VOIDmode, e2,
31805 gen_rtx_PLUS (mode, e1, mthree)));
31807 mhalf = force_reg (mode, mhalf);
31809 /* e3 = -.5 * x0 */
31810 emit_insn (gen_rtx_SET (VOIDmode, e3,
31811 gen_rtx_MULT (mode, x0, mhalf)));
31813 /* e3 = -.5 * e0 */
31814 emit_insn (gen_rtx_SET (VOIDmode, e3,
31815 gen_rtx_MULT (mode, e0, mhalf)));
31816 /* ret = e2 * e3 */
31817 emit_insn (gen_rtx_SET (VOIDmode, res,
31818 gen_rtx_MULT (mode, e2, e3)));
31821 #ifdef TARGET_SOLARIS
31822 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
31825 i386_solaris_elf_named_section (const char *name, unsigned int flags,
31828 /* With Binutils 2.15, the "@unwind" marker must be specified on
31829 every occurrence of the ".eh_frame" section, not just the first
31832 && strcmp (name, ".eh_frame") == 0)
31834 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
31835 flags & SECTION_WRITE ? "aw" : "a");
31840 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
31842 solaris_elf_asm_comdat_section (name, flags, decl);
31847 default_elf_asm_named_section (name, flags, decl);
31849 #endif /* TARGET_SOLARIS */
31851 /* Return the mangling of TYPE if it is an extended fundamental type. */
31853 static const char *
31854 ix86_mangle_type (const_tree type)
31856 type = TYPE_MAIN_VARIANT (type);
31858 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
31859 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
31862 switch (TYPE_MODE (type))
31865 /* __float128 is "g". */
31868 /* "long double" or __float80 is "e". */
31875 /* For 32-bit code we can save PIC register setup by using
31876 __stack_chk_fail_local hidden function instead of calling
31877 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
31878 register, so it is better to call __stack_chk_fail directly. */
31880 static tree ATTRIBUTE_UNUSED
31881 ix86_stack_protect_fail (void)
31883 return TARGET_64BIT
31884 ? default_external_stack_protect_fail ()
31885 : default_hidden_stack_protect_fail ();
31888 /* Select a format to encode pointers in exception handling data. CODE
31889 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
31890 true if the symbol may be affected by dynamic relocations.
31892 ??? All x86 object file formats are capable of representing this.
31893 After all, the relocation needed is the same as for the call insn.
31894 Whether or not a particular assembler allows us to enter such, I
31895 guess we'll have to see. */
31897 asm_preferred_eh_data_format (int code, int global)
31901 int type = DW_EH_PE_sdata8;
31903 || ix86_cmodel == CM_SMALL_PIC
31904 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
31905 type = DW_EH_PE_sdata4;
31906 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
31908 if (ix86_cmodel == CM_SMALL
31909 || (ix86_cmodel == CM_MEDIUM && code))
31910 return DW_EH_PE_udata4;
31911 return DW_EH_PE_absptr;
31914 /* Expand copysign from SIGN to the positive value ABS_VALUE
31915 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
31918 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
31920 enum machine_mode mode = GET_MODE (sign);
31921 rtx sgn = gen_reg_rtx (mode);
31922 if (mask == NULL_RTX)
31924 enum machine_mode vmode;
31926 if (mode == SFmode)
31928 else if (mode == DFmode)
31933 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), false);
31934 if (!VECTOR_MODE_P (mode))
31936 /* We need to generate a scalar mode mask in this case. */
31937 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
31938 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
31939 mask = gen_reg_rtx (mode);
31940 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
31944 mask = gen_rtx_NOT (mode, mask);
31945 emit_insn (gen_rtx_SET (VOIDmode, sgn,
31946 gen_rtx_AND (mode, mask, sign)));
31947 emit_insn (gen_rtx_SET (VOIDmode, result,
31948 gen_rtx_IOR (mode, abs_value, sgn)));
31951 /* Expand fabs (OP0) and return a new rtx that holds the result. The
31952 mask for masking out the sign-bit is stored in *SMASK, if that is
31955 ix86_expand_sse_fabs (rtx op0, rtx *smask)
31957 enum machine_mode vmode, mode = GET_MODE (op0);
31960 xa = gen_reg_rtx (mode);
31961 if (mode == SFmode)
31963 else if (mode == DFmode)
31967 mask = ix86_build_signbit_mask (vmode, VECTOR_MODE_P (mode), true);
31968 if (!VECTOR_MODE_P (mode))
31970 /* We need to generate a scalar mode mask in this case. */
31971 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
31972 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
31973 mask = gen_reg_rtx (mode);
31974 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
31976 emit_insn (gen_rtx_SET (VOIDmode, xa,
31977 gen_rtx_AND (mode, op0, mask)));
31985 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
31986 swapping the operands if SWAP_OPERANDS is true. The expanded
31987 code is a forward jump to a newly created label in case the
31988 comparison is true. The generated label rtx is returned. */
31990 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
31991 bool swap_operands)
32002 label = gen_label_rtx ();
32003 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
32004 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32005 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
32006 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
32007 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
32008 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
32009 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
32010 JUMP_LABEL (tmp) = label;
32015 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
32016 using comparison code CODE. Operands are swapped for the comparison if
32017 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
32019 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
32020 bool swap_operands)
32022 rtx (*insn)(rtx, rtx, rtx, rtx);
32023 enum machine_mode mode = GET_MODE (op0);
32024 rtx mask = gen_reg_rtx (mode);
32033 insn = mode == DFmode ? gen_setcc_df_sse : gen_setcc_sf_sse;
32035 emit_insn (insn (mask, op0, op1,
32036 gen_rtx_fmt_ee (code, mode, op0, op1)));
32040 /* Generate and return a rtx of mode MODE for 2**n where n is the number
32041 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
32043 ix86_gen_TWO52 (enum machine_mode mode)
32045 REAL_VALUE_TYPE TWO52r;
32048 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
32049 TWO52 = const_double_from_real_value (TWO52r, mode);
32050 TWO52 = force_reg (mode, TWO52);
32055 /* Expand SSE sequence for computing lround from OP1 storing
32058 ix86_expand_lround (rtx op0, rtx op1)
32060 /* C code for the stuff we're doing below:
32061 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
32064 enum machine_mode mode = GET_MODE (op1);
32065 const struct real_format *fmt;
32066 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32069 /* load nextafter (0.5, 0.0) */
32070 fmt = REAL_MODE_FORMAT (mode);
32071 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32072 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32074 /* adj = copysign (0.5, op1) */
32075 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
32076 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
32078 /* adj = op1 + adj */
32079 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
32081 /* op0 = (imode)adj */
32082 expand_fix (op0, adj, 0);
32085 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
32088 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
32090 /* C code for the stuff we're doing below (for do_floor):
32092 xi -= (double)xi > op1 ? 1 : 0;
32095 enum machine_mode fmode = GET_MODE (op1);
32096 enum machine_mode imode = GET_MODE (op0);
32097 rtx ireg, freg, label, tmp;
32099 /* reg = (long)op1 */
32100 ireg = gen_reg_rtx (imode);
32101 expand_fix (ireg, op1, 0);
32103 /* freg = (double)reg */
32104 freg = gen_reg_rtx (fmode);
32105 expand_float (freg, ireg, 0);
32107 /* ireg = (freg > op1) ? ireg - 1 : ireg */
32108 label = ix86_expand_sse_compare_and_jump (UNLE,
32109 freg, op1, !do_floor);
32110 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
32111 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
32112 emit_move_insn (ireg, tmp);
32114 emit_label (label);
32115 LABEL_NUSES (label) = 1;
32117 emit_move_insn (op0, ireg);
32120 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
32121 result in OPERAND0. */
32123 ix86_expand_rint (rtx operand0, rtx operand1)
32125 /* C code for the stuff we're doing below:
32126 xa = fabs (operand1);
32127 if (!isless (xa, 2**52))
32129 xa = xa + 2**52 - 2**52;
32130 return copysign (xa, operand1);
32132 enum machine_mode mode = GET_MODE (operand0);
32133 rtx res, xa, label, TWO52, mask;
32135 res = gen_reg_rtx (mode);
32136 emit_move_insn (res, operand1);
32138 /* xa = abs (operand1) */
32139 xa = ix86_expand_sse_fabs (res, &mask);
32141 /* if (!isless (xa, TWO52)) goto label; */
32142 TWO52 = ix86_gen_TWO52 (mode);
32143 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32145 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32146 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32148 ix86_sse_copysign_to_positive (res, xa, res, mask);
32150 emit_label (label);
32151 LABEL_NUSES (label) = 1;
32153 emit_move_insn (operand0, res);
32156 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32159 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
32161 /* C code for the stuff we expand below.
32162 double xa = fabs (x), x2;
32163 if (!isless (xa, TWO52))
32165 xa = xa + TWO52 - TWO52;
32166 x2 = copysign (xa, x);
32175 enum machine_mode mode = GET_MODE (operand0);
32176 rtx xa, TWO52, tmp, label, one, res, mask;
32178 TWO52 = ix86_gen_TWO52 (mode);
32180 /* Temporary for holding the result, initialized to the input
32181 operand to ease control flow. */
32182 res = gen_reg_rtx (mode);
32183 emit_move_insn (res, operand1);
32185 /* xa = abs (operand1) */
32186 xa = ix86_expand_sse_fabs (res, &mask);
32188 /* if (!isless (xa, TWO52)) goto label; */
32189 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32191 /* xa = xa + TWO52 - TWO52; */
32192 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32193 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
32195 /* xa = copysign (xa, operand1) */
32196 ix86_sse_copysign_to_positive (xa, xa, res, mask);
32198 /* generate 1.0 or -1.0 */
32199 one = force_reg (mode,
32200 const_double_from_real_value (do_floor
32201 ? dconst1 : dconstm1, mode));
32203 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32204 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32205 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32206 gen_rtx_AND (mode, one, tmp)));
32207 /* We always need to subtract here to preserve signed zero. */
32208 tmp = expand_simple_binop (mode, MINUS,
32209 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32210 emit_move_insn (res, tmp);
32212 emit_label (label);
32213 LABEL_NUSES (label) = 1;
32215 emit_move_insn (operand0, res);
32218 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
32221 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
32223 /* C code for the stuff we expand below.
32224 double xa = fabs (x), x2;
32225 if (!isless (xa, TWO52))
32227 x2 = (double)(long)x;
32234 if (HONOR_SIGNED_ZEROS (mode))
32235 return copysign (x2, x);
32238 enum machine_mode mode = GET_MODE (operand0);
32239 rtx xa, xi, TWO52, tmp, label, one, res, mask;
32241 TWO52 = ix86_gen_TWO52 (mode);
32243 /* Temporary for holding the result, initialized to the input
32244 operand to ease control flow. */
32245 res = gen_reg_rtx (mode);
32246 emit_move_insn (res, operand1);
32248 /* xa = abs (operand1) */
32249 xa = ix86_expand_sse_fabs (res, &mask);
32251 /* if (!isless (xa, TWO52)) goto label; */
32252 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32254 /* xa = (double)(long)x */
32255 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32256 expand_fix (xi, res, 0);
32257 expand_float (xa, xi, 0);
32260 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32262 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
32263 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
32264 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32265 gen_rtx_AND (mode, one, tmp)));
32266 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
32267 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32268 emit_move_insn (res, tmp);
32270 if (HONOR_SIGNED_ZEROS (mode))
32271 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32273 emit_label (label);
32274 LABEL_NUSES (label) = 1;
32276 emit_move_insn (operand0, res);
32279 /* Expand SSE sequence for computing round from OPERAND1 storing
32280 into OPERAND0. Sequence that works without relying on DImode truncation
32281 via cvttsd2siq that is only available on 64bit targets. */
32283 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
32285 /* C code for the stuff we expand below.
32286 double xa = fabs (x), xa2, x2;
32287 if (!isless (xa, TWO52))
32289 Using the absolute value and copying back sign makes
32290 -0.0 -> -0.0 correct.
32291 xa2 = xa + TWO52 - TWO52;
32296 else if (dxa > 0.5)
32298 x2 = copysign (xa2, x);
32301 enum machine_mode mode = GET_MODE (operand0);
32302 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
32304 TWO52 = ix86_gen_TWO52 (mode);
32306 /* Temporary for holding the result, initialized to the input
32307 operand to ease control flow. */
32308 res = gen_reg_rtx (mode);
32309 emit_move_insn (res, operand1);
32311 /* xa = abs (operand1) */
32312 xa = ix86_expand_sse_fabs (res, &mask);
32314 /* if (!isless (xa, TWO52)) goto label; */
32315 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32317 /* xa2 = xa + TWO52 - TWO52; */
32318 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32319 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
32321 /* dxa = xa2 - xa; */
32322 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
32324 /* generate 0.5, 1.0 and -0.5 */
32325 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
32326 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
32327 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
32331 tmp = gen_reg_rtx (mode);
32332 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
32333 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
32334 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32335 gen_rtx_AND (mode, one, tmp)));
32336 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32337 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
32338 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
32339 emit_insn (gen_rtx_SET (VOIDmode, tmp,
32340 gen_rtx_AND (mode, one, tmp)));
32341 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
32343 /* res = copysign (xa2, operand1) */
32344 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
32346 emit_label (label);
32347 LABEL_NUSES (label) = 1;
32349 emit_move_insn (operand0, res);
32352 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32355 ix86_expand_trunc (rtx operand0, rtx operand1)
32357 /* C code for SSE variant we expand below.
32358 double xa = fabs (x), x2;
32359 if (!isless (xa, TWO52))
32361 x2 = (double)(long)x;
32362 if (HONOR_SIGNED_ZEROS (mode))
32363 return copysign (x2, x);
32366 enum machine_mode mode = GET_MODE (operand0);
32367 rtx xa, xi, TWO52, label, res, mask;
32369 TWO52 = ix86_gen_TWO52 (mode);
32371 /* Temporary for holding the result, initialized to the input
32372 operand to ease control flow. */
32373 res = gen_reg_rtx (mode);
32374 emit_move_insn (res, operand1);
32376 /* xa = abs (operand1) */
32377 xa = ix86_expand_sse_fabs (res, &mask);
32379 /* if (!isless (xa, TWO52)) goto label; */
32380 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32382 /* x = (double)(long)x */
32383 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32384 expand_fix (xi, res, 0);
32385 expand_float (res, xi, 0);
32387 if (HONOR_SIGNED_ZEROS (mode))
32388 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
32390 emit_label (label);
32391 LABEL_NUSES (label) = 1;
32393 emit_move_insn (operand0, res);
32396 /* Expand SSE sequence for computing trunc from OPERAND1 storing
32399 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
32401 enum machine_mode mode = GET_MODE (operand0);
32402 rtx xa, mask, TWO52, label, one, res, smask, tmp;
32404 /* C code for SSE variant we expand below.
32405 double xa = fabs (x), x2;
32406 if (!isless (xa, TWO52))
32408 xa2 = xa + TWO52 - TWO52;
32412 x2 = copysign (xa2, x);
32416 TWO52 = ix86_gen_TWO52 (mode);
32418 /* Temporary for holding the result, initialized to the input
32419 operand to ease control flow. */
32420 res = gen_reg_rtx (mode);
32421 emit_move_insn (res, operand1);
32423 /* xa = abs (operand1) */
32424 xa = ix86_expand_sse_fabs (res, &smask);
32426 /* if (!isless (xa, TWO52)) goto label; */
32427 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32429 /* res = xa + TWO52 - TWO52; */
32430 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
32431 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
32432 emit_move_insn (res, tmp);
32435 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
32437 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
32438 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
32439 emit_insn (gen_rtx_SET (VOIDmode, mask,
32440 gen_rtx_AND (mode, mask, one)));
32441 tmp = expand_simple_binop (mode, MINUS,
32442 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
32443 emit_move_insn (res, tmp);
32445 /* res = copysign (res, operand1) */
32446 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
32448 emit_label (label);
32449 LABEL_NUSES (label) = 1;
32451 emit_move_insn (operand0, res);
32454 /* Expand SSE sequence for computing round from OPERAND1 storing
32457 ix86_expand_round (rtx operand0, rtx operand1)
32459 /* C code for the stuff we're doing below:
32460 double xa = fabs (x);
32461 if (!isless (xa, TWO52))
32463 xa = (double)(long)(xa + nextafter (0.5, 0.0));
32464 return copysign (xa, x);
32466 enum machine_mode mode = GET_MODE (operand0);
32467 rtx res, TWO52, xa, label, xi, half, mask;
32468 const struct real_format *fmt;
32469 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
32471 /* Temporary for holding the result, initialized to the input
32472 operand to ease control flow. */
32473 res = gen_reg_rtx (mode);
32474 emit_move_insn (res, operand1);
32476 TWO52 = ix86_gen_TWO52 (mode);
32477 xa = ix86_expand_sse_fabs (res, &mask);
32478 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
32480 /* load nextafter (0.5, 0.0) */
32481 fmt = REAL_MODE_FORMAT (mode);
32482 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
32483 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
32485 /* xa = xa + 0.5 */
32486 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
32487 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
32489 /* xa = (double)(int64_t)xa */
32490 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
32491 expand_fix (xi, xa, 0);
32492 expand_float (xa, xi, 0);
32494 /* res = copysign (xa, operand1) */
32495 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
32497 emit_label (label);
32498 LABEL_NUSES (label) = 1;
32500 emit_move_insn (operand0, res);
32504 /* Table of valid machine attributes. */
32505 static const struct attribute_spec ix86_attribute_table[] =
32507 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
32508 affects_type_identity } */
32509 /* Stdcall attribute says callee is responsible for popping arguments
32510 if they are not variable. */
32511 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32513 /* Fastcall attribute says callee is responsible for popping arguments
32514 if they are not variable. */
32515 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32517 /* Thiscall attribute says callee is responsible for popping arguments
32518 if they are not variable. */
32519 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32521 /* Cdecl attribute says the callee is a normal C declaration */
32522 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32524 /* Regparm attribute specifies how many integer arguments are to be
32525 passed in registers. */
32526 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute,
32528 /* Sseregparm attribute says we are using x86_64 calling conventions
32529 for FP arguments. */
32530 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute,
32532 /* force_align_arg_pointer says this function realigns the stack at entry. */
32533 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
32534 false, true, true, ix86_handle_cconv_attribute, false },
32535 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
32536 { "dllimport", 0, 0, false, false, false, handle_dll_attribute, false },
32537 { "dllexport", 0, 0, false, false, false, handle_dll_attribute, false },
32538 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute,
32541 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
32543 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute,
32545 #ifdef SUBTARGET_ATTRIBUTE_TABLE
32546 SUBTARGET_ATTRIBUTE_TABLE,
32548 /* ms_abi and sysv_abi calling convention function attributes. */
32549 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
32550 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute, true },
32551 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute,
32553 { "callee_pop_aggregate_return", 1, 1, false, true, true,
32554 ix86_handle_callee_pop_aggregate_return, true },
32556 { NULL, 0, 0, false, false, false, NULL, false }
32559 /* Implement targetm.vectorize.builtin_vectorization_cost. */
32561 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
32562 tree vectype ATTRIBUTE_UNUSED,
32563 int misalign ATTRIBUTE_UNUSED)
32565 switch (type_of_cost)
32568 return ix86_cost->scalar_stmt_cost;
32571 return ix86_cost->scalar_load_cost;
32574 return ix86_cost->scalar_store_cost;
32577 return ix86_cost->vec_stmt_cost;
32580 return ix86_cost->vec_align_load_cost;
32583 return ix86_cost->vec_store_cost;
32585 case vec_to_scalar:
32586 return ix86_cost->vec_to_scalar_cost;
32588 case scalar_to_vec:
32589 return ix86_cost->scalar_to_vec_cost;
32591 case unaligned_load:
32592 case unaligned_store:
32593 return ix86_cost->vec_unalign_load_cost;
32595 case cond_branch_taken:
32596 return ix86_cost->cond_taken_branch_cost;
32598 case cond_branch_not_taken:
32599 return ix86_cost->cond_not_taken_branch_cost;
32605 gcc_unreachable ();
32610 /* Implement targetm.vectorize.builtin_vec_perm. */
32613 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
32615 tree itype = TREE_TYPE (vec_type);
32616 bool u = TYPE_UNSIGNED (itype);
32617 enum machine_mode vmode = TYPE_MODE (vec_type);
32618 enum ix86_builtins fcode;
32619 bool ok = TARGET_SSE2;
32625 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
32628 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
32630 itype = ix86_get_builtin_type (IX86_BT_DI);
32635 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
32639 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
32641 itype = ix86_get_builtin_type (IX86_BT_SI);
32645 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
32648 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
32651 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
32654 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
32664 *mask_type = itype;
32665 return ix86_builtins[(int) fcode];
32668 /* Return a vector mode with twice as many elements as VMODE. */
32669 /* ??? Consider moving this to a table generated by genmodes.c. */
32671 static enum machine_mode
32672 doublesize_vector_mode (enum machine_mode vmode)
32676 case V2SFmode: return V4SFmode;
32677 case V1DImode: return V2DImode;
32678 case V2SImode: return V4SImode;
32679 case V4HImode: return V8HImode;
32680 case V8QImode: return V16QImode;
32682 case V2DFmode: return V4DFmode;
32683 case V4SFmode: return V8SFmode;
32684 case V2DImode: return V4DImode;
32685 case V4SImode: return V8SImode;
32686 case V8HImode: return V16HImode;
32687 case V16QImode: return V32QImode;
32689 case V4DFmode: return V8DFmode;
32690 case V8SFmode: return V16SFmode;
32691 case V4DImode: return V8DImode;
32692 case V8SImode: return V16SImode;
32693 case V16HImode: return V32HImode;
32694 case V32QImode: return V64QImode;
32697 gcc_unreachable ();
32701 /* Construct (set target (vec_select op0 (parallel perm))) and
32702 return true if that's a valid instruction in the active ISA. */
32705 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
32707 rtx rperm[MAX_VECT_LEN], x;
32710 for (i = 0; i < nelt; ++i)
32711 rperm[i] = GEN_INT (perm[i]);
32713 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
32714 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
32715 x = gen_rtx_SET (VOIDmode, target, x);
32718 if (recog_memoized (x) < 0)
32726 /* Similar, but generate a vec_concat from op0 and op1 as well. */
32729 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
32730 const unsigned char *perm, unsigned nelt)
32732 enum machine_mode v2mode;
32735 v2mode = doublesize_vector_mode (GET_MODE (op0));
32736 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
32737 return expand_vselect (target, x, perm, nelt);
32740 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
32741 in terms of blendp[sd] / pblendw / pblendvb. */
32744 expand_vec_perm_blend (struct expand_vec_perm_d *d)
32746 enum machine_mode vmode = d->vmode;
32747 unsigned i, mask, nelt = d->nelt;
32748 rtx target, op0, op1, x;
32750 if (!TARGET_SSE4_1 || d->op0 == d->op1)
32752 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
32755 /* This is a blend, not a permute. Elements must stay in their
32756 respective lanes. */
32757 for (i = 0; i < nelt; ++i)
32759 unsigned e = d->perm[i];
32760 if (!(e == i || e == i + nelt))
32767 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
32768 decision should be extracted elsewhere, so that we only try that
32769 sequence once all budget==3 options have been tried. */
32771 /* For bytes, see if bytes move in pairs so we can use pblendw with
32772 an immediate argument, rather than pblendvb with a vector argument. */
32773 if (vmode == V16QImode)
32775 bool pblendw_ok = true;
32776 for (i = 0; i < 16 && pblendw_ok; i += 2)
32777 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
32781 rtx rperm[16], vperm;
32783 for (i = 0; i < nelt; ++i)
32784 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
32786 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
32787 vperm = force_reg (V16QImode, vperm);
32789 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
32794 target = d->target;
32806 for (i = 0; i < nelt; ++i)
32807 mask |= (d->perm[i] >= nelt) << i;
32811 for (i = 0; i < 2; ++i)
32812 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
32816 for (i = 0; i < 4; ++i)
32817 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
32821 for (i = 0; i < 8; ++i)
32822 mask |= (d->perm[i * 2] >= 16) << i;
32826 target = gen_lowpart (vmode, target);
32827 op0 = gen_lowpart (vmode, op0);
32828 op1 = gen_lowpart (vmode, op1);
32832 gcc_unreachable ();
32835 /* This matches five different patterns with the different modes. */
32836 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
32837 x = gen_rtx_SET (VOIDmode, target, x);
32843 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
32844 in terms of the variable form of vpermilps.
32846 Note that we will have already failed the immediate input vpermilps,
32847 which requires that the high and low part shuffle be identical; the
32848 variable form doesn't require that. */
32851 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
32853 rtx rperm[8], vperm;
32856 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
32859 /* We can only permute within the 128-bit lane. */
32860 for (i = 0; i < 8; ++i)
32862 unsigned e = d->perm[i];
32863 if (i < 4 ? e >= 4 : e < 4)
32870 for (i = 0; i < 8; ++i)
32872 unsigned e = d->perm[i];
32874 /* Within each 128-bit lane, the elements of op0 are numbered
32875 from 0 and the elements of op1 are numbered from 4. */
32881 rperm[i] = GEN_INT (e);
32884 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
32885 vperm = force_reg (V8SImode, vperm);
32886 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
32891 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
32892 in terms of pshufb or vpperm. */
32895 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
32897 unsigned i, nelt, eltsz;
32898 rtx rperm[16], vperm, target, op0, op1;
32900 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
32902 if (GET_MODE_SIZE (d->vmode) != 16)
32909 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
32911 for (i = 0; i < nelt; ++i)
32913 unsigned j, e = d->perm[i];
32914 for (j = 0; j < eltsz; ++j)
32915 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
32918 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
32919 vperm = force_reg (V16QImode, vperm);
32921 target = gen_lowpart (V16QImode, d->target);
32922 op0 = gen_lowpart (V16QImode, d->op0);
32923 if (d->op0 == d->op1)
32924 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
32927 op1 = gen_lowpart (V16QImode, d->op1);
32928 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
32934 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
32935 in a single instruction. */
32938 expand_vec_perm_1 (struct expand_vec_perm_d *d)
32940 unsigned i, nelt = d->nelt;
32941 unsigned char perm2[MAX_VECT_LEN];
32943 /* Check plain VEC_SELECT first, because AVX has instructions that could
32944 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
32945 input where SEL+CONCAT may not. */
32946 if (d->op0 == d->op1)
32948 int mask = nelt - 1;
32950 for (i = 0; i < nelt; i++)
32951 perm2[i] = d->perm[i] & mask;
32953 if (expand_vselect (d->target, d->op0, perm2, nelt))
32956 /* There are plenty of patterns in sse.md that are written for
32957 SEL+CONCAT and are not replicated for a single op. Perhaps
32958 that should be changed, to avoid the nastiness here. */
32960 /* Recognize interleave style patterns, which means incrementing
32961 every other permutation operand. */
32962 for (i = 0; i < nelt; i += 2)
32964 perm2[i] = d->perm[i] & mask;
32965 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
32967 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
32970 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
32973 for (i = 0; i < nelt; i += 4)
32975 perm2[i + 0] = d->perm[i + 0] & mask;
32976 perm2[i + 1] = d->perm[i + 1] & mask;
32977 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
32978 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
32981 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
32986 /* Finally, try the fully general two operand permute. */
32987 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
32990 /* Recognize interleave style patterns with reversed operands. */
32991 if (d->op0 != d->op1)
32993 for (i = 0; i < nelt; ++i)
32995 unsigned e = d->perm[i];
33003 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
33007 /* Try the SSE4.1 blend variable merge instructions. */
33008 if (expand_vec_perm_blend (d))
33011 /* Try one of the AVX vpermil variable permutations. */
33012 if (expand_vec_perm_vpermil (d))
33015 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
33016 if (expand_vec_perm_pshufb (d))
33022 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
33023 in terms of a pair of pshuflw + pshufhw instructions. */
33026 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
33028 unsigned char perm2[MAX_VECT_LEN];
33032 if (d->vmode != V8HImode || d->op0 != d->op1)
33035 /* The two permutations only operate in 64-bit lanes. */
33036 for (i = 0; i < 4; ++i)
33037 if (d->perm[i] >= 4)
33039 for (i = 4; i < 8; ++i)
33040 if (d->perm[i] < 4)
33046 /* Emit the pshuflw. */
33047 memcpy (perm2, d->perm, 4);
33048 for (i = 4; i < 8; ++i)
33050 ok = expand_vselect (d->target, d->op0, perm2, 8);
33053 /* Emit the pshufhw. */
33054 memcpy (perm2 + 4, d->perm + 4, 4);
33055 for (i = 0; i < 4; ++i)
33057 ok = expand_vselect (d->target, d->target, perm2, 8);
33063 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33064 the permutation using the SSSE3 palignr instruction. This succeeds
33065 when all of the elements in PERM fit within one vector and we merely
33066 need to shift them down so that a single vector permutation has a
33067 chance to succeed. */
33070 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
33072 unsigned i, nelt = d->nelt;
33077 /* Even with AVX, palignr only operates on 128-bit vectors. */
33078 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33081 min = nelt, max = 0;
33082 for (i = 0; i < nelt; ++i)
33084 unsigned e = d->perm[i];
33090 if (min == 0 || max - min >= nelt)
33093 /* Given that we have SSSE3, we know we'll be able to implement the
33094 single operand permutation after the palignr with pshufb. */
33098 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
33099 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
33100 gen_lowpart (TImode, d->op1),
33101 gen_lowpart (TImode, d->op0), shift));
33103 d->op0 = d->op1 = d->target;
33106 for (i = 0; i < nelt; ++i)
33108 unsigned e = d->perm[i] - min;
33114 /* Test for the degenerate case where the alignment by itself
33115 produces the desired permutation. */
33119 ok = expand_vec_perm_1 (d);
33125 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
33126 a two vector permutation into a single vector permutation by using
33127 an interleave operation to merge the vectors. */
33130 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
33132 struct expand_vec_perm_d dremap, dfinal;
33133 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
33134 unsigned contents, h1, h2, h3, h4;
33135 unsigned char remap[2 * MAX_VECT_LEN];
33139 if (d->op0 == d->op1)
33142 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
33143 lanes. We can use similar techniques with the vperm2f128 instruction,
33144 but it requires slightly different logic. */
33145 if (GET_MODE_SIZE (d->vmode) != 16)
33148 /* Examine from whence the elements come. */
33150 for (i = 0; i < nelt; ++i)
33151 contents |= 1u << d->perm[i];
33153 /* Split the two input vectors into 4 halves. */
33154 h1 = (1u << nelt2) - 1;
33159 memset (remap, 0xff, sizeof (remap));
33162 /* If the elements from the low halves use interleave low, and similarly
33163 for interleave high. If the elements are from mis-matched halves, we
33164 can use shufps for V4SF/V4SI or do a DImode shuffle. */
33165 if ((contents & (h1 | h3)) == contents)
33167 for (i = 0; i < nelt2; ++i)
33170 remap[i + nelt] = i * 2 + 1;
33171 dremap.perm[i * 2] = i;
33172 dremap.perm[i * 2 + 1] = i + nelt;
33175 else if ((contents & (h2 | h4)) == contents)
33177 for (i = 0; i < nelt2; ++i)
33179 remap[i + nelt2] = i * 2;
33180 remap[i + nelt + nelt2] = i * 2 + 1;
33181 dremap.perm[i * 2] = i + nelt2;
33182 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
33185 else if ((contents & (h1 | h4)) == contents)
33187 for (i = 0; i < nelt2; ++i)
33190 remap[i + nelt + nelt2] = i + nelt2;
33191 dremap.perm[i] = i;
33192 dremap.perm[i + nelt2] = i + nelt + nelt2;
33196 dremap.vmode = V2DImode;
33198 dremap.perm[0] = 0;
33199 dremap.perm[1] = 3;
33202 else if ((contents & (h2 | h3)) == contents)
33204 for (i = 0; i < nelt2; ++i)
33206 remap[i + nelt2] = i;
33207 remap[i + nelt] = i + nelt2;
33208 dremap.perm[i] = i + nelt2;
33209 dremap.perm[i + nelt2] = i + nelt;
33213 dremap.vmode = V2DImode;
33215 dremap.perm[0] = 1;
33216 dremap.perm[1] = 2;
33222 /* Use the remapping array set up above to move the elements from their
33223 swizzled locations into their final destinations. */
33225 for (i = 0; i < nelt; ++i)
33227 unsigned e = remap[d->perm[i]];
33228 gcc_assert (e < nelt);
33229 dfinal.perm[i] = e;
33231 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
33232 dfinal.op1 = dfinal.op0;
33233 dremap.target = dfinal.op0;
33235 /* Test if the final remap can be done with a single insn. For V4SFmode or
33236 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
33238 ok = expand_vec_perm_1 (&dfinal);
33239 seq = get_insns ();
33245 if (dremap.vmode != dfinal.vmode)
33247 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
33248 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
33249 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
33252 ok = expand_vec_perm_1 (&dremap);
33259 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
33260 permutation with two pshufb insns and an ior. We should have already
33261 failed all two instruction sequences. */
33264 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
33266 rtx rperm[2][16], vperm, l, h, op, m128;
33267 unsigned int i, nelt, eltsz;
33269 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
33271 gcc_assert (d->op0 != d->op1);
33274 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
33276 /* Generate two permutation masks. If the required element is within
33277 the given vector it is shuffled into the proper lane. If the required
33278 element is in the other vector, force a zero into the lane by setting
33279 bit 7 in the permutation mask. */
33280 m128 = GEN_INT (-128);
33281 for (i = 0; i < nelt; ++i)
33283 unsigned j, e = d->perm[i];
33284 unsigned which = (e >= nelt);
33288 for (j = 0; j < eltsz; ++j)
33290 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
33291 rperm[1-which][i*eltsz + j] = m128;
33295 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
33296 vperm = force_reg (V16QImode, vperm);
33298 l = gen_reg_rtx (V16QImode);
33299 op = gen_lowpart (V16QImode, d->op0);
33300 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
33302 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
33303 vperm = force_reg (V16QImode, vperm);
33305 h = gen_reg_rtx (V16QImode);
33306 op = gen_lowpart (V16QImode, d->op1);
33307 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
33309 op = gen_lowpart (V16QImode, d->target);
33310 emit_insn (gen_iorv16qi3 (op, l, h));
33315 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
33316 and extract-odd permutations. */
33319 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
33326 t1 = gen_reg_rtx (V4DFmode);
33327 t2 = gen_reg_rtx (V4DFmode);
33329 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
33330 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
33331 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
33333 /* Now an unpck[lh]pd will produce the result required. */
33335 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
33337 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
33343 int mask = odd ? 0xdd : 0x88;
33345 t1 = gen_reg_rtx (V8SFmode);
33346 t2 = gen_reg_rtx (V8SFmode);
33347 t3 = gen_reg_rtx (V8SFmode);
33349 /* Shuffle within the 128-bit lanes to produce:
33350 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */
33351 emit_insn (gen_avx_shufps256 (t1, d->op0, d->op1,
33354 /* Shuffle the lanes around to produce:
33355 { 4 6 c e 0 2 8 a } and { 5 7 d f 1 3 9 b }. */
33356 emit_insn (gen_avx_vperm2f128v8sf3 (t2, t1, t1,
33359 /* Shuffle within the 128-bit lanes to produce:
33360 { 0 2 4 6 4 6 0 2 } | { 1 3 5 7 5 7 1 3 }. */
33361 emit_insn (gen_avx_shufps256 (t3, t1, t2, GEN_INT (0x44)));
33363 /* Shuffle within the 128-bit lanes to produce:
33364 { 8 a c e c e 8 a } | { 9 b d f d f 9 b }. */
33365 emit_insn (gen_avx_shufps256 (t2, t1, t2, GEN_INT (0xee)));
33367 /* Shuffle the lanes around to produce:
33368 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */
33369 emit_insn (gen_avx_vperm2f128v8sf3 (d->target, t3, t2,
33378 /* These are always directly implementable by expand_vec_perm_1. */
33379 gcc_unreachable ();
33383 return expand_vec_perm_pshufb2 (d);
33386 /* We need 2*log2(N)-1 operations to achieve odd/even
33387 with interleave. */
33388 t1 = gen_reg_rtx (V8HImode);
33389 t2 = gen_reg_rtx (V8HImode);
33390 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
33391 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
33392 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
33393 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
33395 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
33397 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
33404 return expand_vec_perm_pshufb2 (d);
33407 t1 = gen_reg_rtx (V16QImode);
33408 t2 = gen_reg_rtx (V16QImode);
33409 t3 = gen_reg_rtx (V16QImode);
33410 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
33411 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
33412 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
33413 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
33414 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
33415 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
33417 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
33419 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
33425 gcc_unreachable ();
33431 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33432 extract-even and extract-odd permutations. */
33435 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
33437 unsigned i, odd, nelt = d->nelt;
33440 if (odd != 0 && odd != 1)
33443 for (i = 1; i < nelt; ++i)
33444 if (d->perm[i] != 2 * i + odd)
33447 return expand_vec_perm_even_odd_1 (d, odd);
33450 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
33451 permutations. We assume that expand_vec_perm_1 has already failed. */
33454 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
33456 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
33457 enum machine_mode vmode = d->vmode;
33458 unsigned char perm2[4];
33466 /* These are special-cased in sse.md so that we can optionally
33467 use the vbroadcast instruction. They expand to two insns
33468 if the input happens to be in a register. */
33469 gcc_unreachable ();
33475 /* These are always implementable using standard shuffle patterns. */
33476 gcc_unreachable ();
33480 /* These can be implemented via interleave. We save one insn by
33481 stopping once we have promoted to V4SImode and then use pshufd. */
33484 optab otab = vec_interleave_low_optab;
33488 otab = vec_interleave_high_optab;
33493 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
33494 vmode = get_mode_wider_vector (vmode);
33495 op0 = gen_lowpart (vmode, op0);
33497 while (vmode != V4SImode);
33499 memset (perm2, elt, 4);
33500 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
33505 gcc_unreachable ();
33509 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
33510 broadcast permutations. */
33513 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
33515 unsigned i, elt, nelt = d->nelt;
33517 if (d->op0 != d->op1)
33521 for (i = 1; i < nelt; ++i)
33522 if (d->perm[i] != elt)
33525 return expand_vec_perm_broadcast_1 (d);
33528 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
33529 With all of the interface bits taken care of, perform the expansion
33530 in D and return true on success. */
33533 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
33535 /* Try a single instruction expansion. */
33536 if (expand_vec_perm_1 (d))
33539 /* Try sequences of two instructions. */
33541 if (expand_vec_perm_pshuflw_pshufhw (d))
33544 if (expand_vec_perm_palignr (d))
33547 if (expand_vec_perm_interleave2 (d))
33550 if (expand_vec_perm_broadcast (d))
33553 /* Try sequences of three instructions. */
33555 if (expand_vec_perm_pshufb2 (d))
33558 /* ??? Look for narrow permutations whose element orderings would
33559 allow the promotion to a wider mode. */
33561 /* ??? Look for sequences of interleave or a wider permute that place
33562 the data into the correct lanes for a half-vector shuffle like
33563 pshuf[lh]w or vpermilps. */
33565 /* ??? Look for sequences of interleave that produce the desired results.
33566 The combinatorics of punpck[lh] get pretty ugly... */
33568 if (expand_vec_perm_even_odd (d))
33574 /* Extract the values from the vector CST into the permutation array in D.
33575 Return 0 on error, 1 if all values from the permutation come from the
33576 first vector, 2 if all values from the second vector, and 3 otherwise. */
33579 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
33581 tree list = TREE_VECTOR_CST_ELTS (cst);
33582 unsigned i, nelt = d->nelt;
33585 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
33587 unsigned HOST_WIDE_INT e;
33589 if (!host_integerp (TREE_VALUE (list), 1))
33591 e = tree_low_cst (TREE_VALUE (list), 1);
33595 ret |= (e < nelt ? 1 : 2);
33598 gcc_assert (list == NULL);
33600 /* For all elements from second vector, fold the elements to first. */
33602 for (i = 0; i < nelt; ++i)
33603 d->perm[i] -= nelt;
33609 ix86_expand_vec_perm_builtin (tree exp)
33611 struct expand_vec_perm_d d;
33612 tree arg0, arg1, arg2;
33614 arg0 = CALL_EXPR_ARG (exp, 0);
33615 arg1 = CALL_EXPR_ARG (exp, 1);
33616 arg2 = CALL_EXPR_ARG (exp, 2);
33618 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
33619 d.nelt = GET_MODE_NUNITS (d.vmode);
33620 d.testing_p = false;
33621 gcc_assert (VECTOR_MODE_P (d.vmode));
33623 if (TREE_CODE (arg2) != VECTOR_CST)
33625 error_at (EXPR_LOCATION (exp),
33626 "vector permutation requires vector constant");
33630 switch (extract_vec_perm_cst (&d, arg2))
33636 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
33640 if (!operand_equal_p (arg0, arg1, 0))
33642 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33643 d.op0 = force_reg (d.vmode, d.op0);
33644 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33645 d.op1 = force_reg (d.vmode, d.op1);
33649 /* The elements of PERM do not suggest that only the first operand
33650 is used, but both operands are identical. Allow easier matching
33651 of the permutation by folding the permutation into the single
33654 unsigned i, nelt = d.nelt;
33655 for (i = 0; i < nelt; ++i)
33656 if (d.perm[i] >= nelt)
33662 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
33663 d.op0 = force_reg (d.vmode, d.op0);
33668 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
33669 d.op0 = force_reg (d.vmode, d.op0);
33674 d.target = gen_reg_rtx (d.vmode);
33675 if (ix86_expand_vec_perm_builtin_1 (&d))
33678 /* For compiler generated permutations, we should never got here, because
33679 the compiler should also be checking the ok hook. But since this is a
33680 builtin the user has access too, so don't abort. */
33684 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
33687 sorry ("vector permutation (%d %d %d %d)",
33688 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
33691 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
33692 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
33693 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
33696 sorry ("vector permutation "
33697 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
33698 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
33699 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
33700 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
33701 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
33704 gcc_unreachable ();
33707 return CONST0_RTX (d.vmode);
33710 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
33713 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
33715 struct expand_vec_perm_d d;
33719 d.vmode = TYPE_MODE (vec_type);
33720 d.nelt = GET_MODE_NUNITS (d.vmode);
33721 d.testing_p = true;
33723 /* Given sufficient ISA support we can just return true here
33724 for selected vector modes. */
33725 if (GET_MODE_SIZE (d.vmode) == 16)
33727 /* All implementable with a single vpperm insn. */
33730 /* All implementable with 2 pshufb + 1 ior. */
33733 /* All implementable with shufpd or unpck[lh]pd. */
33738 vec_mask = extract_vec_perm_cst (&d, mask);
33740 /* This hook is cannot be called in response to something that the
33741 user does (unlike the builtin expander) so we shouldn't ever see
33742 an error generated from the extract. */
33743 gcc_assert (vec_mask > 0 && vec_mask <= 3);
33744 one_vec = (vec_mask != 3);
33746 /* Implementable with shufps or pshufd. */
33747 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
33750 /* Otherwise we have to go through the motions and see if we can
33751 figure out how to generate the requested permutation. */
33752 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
33753 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
33755 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
33758 ret = ix86_expand_vec_perm_builtin_1 (&d);
33765 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
33767 struct expand_vec_perm_d d;
33773 d.vmode = GET_MODE (targ);
33774 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
33775 d.testing_p = false;
33777 for (i = 0; i < nelt; ++i)
33778 d.perm[i] = i * 2 + odd;
33780 /* We'll either be able to implement the permutation directly... */
33781 if (expand_vec_perm_1 (&d))
33784 /* ... or we use the special-case patterns. */
33785 expand_vec_perm_even_odd_1 (&d, odd);
33788 /* Expand an insert into a vector register through pinsr insn.
33789 Return true if successful. */
33792 ix86_expand_pinsr (rtx *operands)
33794 rtx dst = operands[0];
33795 rtx src = operands[3];
33797 unsigned int size = INTVAL (operands[1]);
33798 unsigned int pos = INTVAL (operands[2]);
33800 if (GET_CODE (dst) == SUBREG)
33802 pos += SUBREG_BYTE (dst) * BITS_PER_UNIT;
33803 dst = SUBREG_REG (dst);
33806 if (GET_CODE (src) == SUBREG)
33807 src = SUBREG_REG (src);
33809 switch (GET_MODE (dst))
33816 enum machine_mode srcmode, dstmode;
33817 rtx (*pinsr)(rtx, rtx, rtx, rtx);
33819 srcmode = mode_for_size (size, MODE_INT, 0);
33824 if (!TARGET_SSE4_1)
33826 dstmode = V16QImode;
33827 pinsr = gen_sse4_1_pinsrb;
33833 dstmode = V8HImode;
33834 pinsr = gen_sse2_pinsrw;
33838 if (!TARGET_SSE4_1)
33840 dstmode = V4SImode;
33841 pinsr = gen_sse4_1_pinsrd;
33845 gcc_assert (TARGET_64BIT);
33846 if (!TARGET_SSE4_1)
33848 dstmode = V2DImode;
33849 pinsr = gen_sse4_1_pinsrq;
33856 dst = gen_lowpart (dstmode, dst);
33857 src = gen_lowpart (srcmode, src);
33861 emit_insn (pinsr (dst, dst, src, GEN_INT (1 << pos)));
33870 /* This function returns the calling abi specific va_list type node.
33871 It returns the FNDECL specific va_list type. */
33874 ix86_fn_abi_va_list (tree fndecl)
33877 return va_list_type_node;
33878 gcc_assert (fndecl != NULL_TREE);
33880 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
33881 return ms_va_list_type_node;
33883 return sysv_va_list_type_node;
33886 /* Returns the canonical va_list type specified by TYPE. If there
33887 is no valid TYPE provided, it return NULL_TREE. */
33890 ix86_canonical_va_list_type (tree type)
33894 /* Resolve references and pointers to va_list type. */
33895 if (TREE_CODE (type) == MEM_REF)
33896 type = TREE_TYPE (type);
33897 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
33898 type = TREE_TYPE (type);
33899 else if (POINTER_TYPE_P (type) && TREE_CODE (TREE_TYPE (type)) == ARRAY_TYPE)
33900 type = TREE_TYPE (type);
33902 if (TARGET_64BIT && va_list_type_node != NULL_TREE)
33904 wtype = va_list_type_node;
33905 gcc_assert (wtype != NULL_TREE);
33907 if (TREE_CODE (wtype) == ARRAY_TYPE)
33909 /* If va_list is an array type, the argument may have decayed
33910 to a pointer type, e.g. by being passed to another function.
33911 In that case, unwrap both types so that we can compare the
33912 underlying records. */
33913 if (TREE_CODE (htype) == ARRAY_TYPE
33914 || POINTER_TYPE_P (htype))
33916 wtype = TREE_TYPE (wtype);
33917 htype = TREE_TYPE (htype);
33920 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
33921 return va_list_type_node;
33922 wtype = sysv_va_list_type_node;
33923 gcc_assert (wtype != NULL_TREE);
33925 if (TREE_CODE (wtype) == ARRAY_TYPE)
33927 /* If va_list is an array type, the argument may have decayed
33928 to a pointer type, e.g. by being passed to another function.
33929 In that case, unwrap both types so that we can compare the
33930 underlying records. */
33931 if (TREE_CODE (htype) == ARRAY_TYPE
33932 || POINTER_TYPE_P (htype))
33934 wtype = TREE_TYPE (wtype);
33935 htype = TREE_TYPE (htype);
33938 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
33939 return sysv_va_list_type_node;
33940 wtype = ms_va_list_type_node;
33941 gcc_assert (wtype != NULL_TREE);
33943 if (TREE_CODE (wtype) == ARRAY_TYPE)
33945 /* If va_list is an array type, the argument may have decayed
33946 to a pointer type, e.g. by being passed to another function.
33947 In that case, unwrap both types so that we can compare the
33948 underlying records. */
33949 if (TREE_CODE (htype) == ARRAY_TYPE
33950 || POINTER_TYPE_P (htype))
33952 wtype = TREE_TYPE (wtype);
33953 htype = TREE_TYPE (htype);
33956 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
33957 return ms_va_list_type_node;
33960 return std_canonical_va_list_type (type);
33963 /* Iterate through the target-specific builtin types for va_list.
33964 IDX denotes the iterator, *PTREE is set to the result type of
33965 the va_list builtin, and *PNAME to its internal type.
33966 Returns zero if there is no element for this index, otherwise
33967 IDX should be increased upon the next call.
33968 Note, do not iterate a base builtin's name like __builtin_va_list.
33969 Used from c_common_nodes_and_builtins. */
33972 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
33982 *ptree = ms_va_list_type_node;
33983 *pname = "__builtin_ms_va_list";
33987 *ptree = sysv_va_list_type_node;
33988 *pname = "__builtin_sysv_va_list";
33996 #undef TARGET_SCHED_DISPATCH
33997 #define TARGET_SCHED_DISPATCH has_dispatch
33998 #undef TARGET_SCHED_DISPATCH_DO
33999 #define TARGET_SCHED_DISPATCH_DO do_dispatch
34001 /* The size of the dispatch window is the total number of bytes of
34002 object code allowed in a window. */
34003 #define DISPATCH_WINDOW_SIZE 16
34005 /* Number of dispatch windows considered for scheduling. */
34006 #define MAX_DISPATCH_WINDOWS 3
34008 /* Maximum number of instructions in a window. */
34011 /* Maximum number of immediate operands in a window. */
34014 /* Maximum number of immediate bits allowed in a window. */
34015 #define MAX_IMM_SIZE 128
34017 /* Maximum number of 32 bit immediates allowed in a window. */
34018 #define MAX_IMM_32 4
34020 /* Maximum number of 64 bit immediates allowed in a window. */
34021 #define MAX_IMM_64 2
34023 /* Maximum total of loads or prefetches allowed in a window. */
34026 /* Maximum total of stores allowed in a window. */
34027 #define MAX_STORE 1
34033 /* Dispatch groups. Istructions that affect the mix in a dispatch window. */
34034 enum dispatch_group {
34049 /* Number of allowable groups in a dispatch window. It is an array
34050 indexed by dispatch_group enum. 100 is used as a big number,
34051 because the number of these kind of operations does not have any
34052 effect in dispatch window, but we need them for other reasons in
34054 static unsigned int num_allowable_groups[disp_last] = {
34055 0, 2, 1, 1, 2, 4, 4, 2, 1, BIG, BIG
34058 char group_name[disp_last + 1][16] = {
34059 "disp_no_group", "disp_load", "disp_store", "disp_load_store",
34060 "disp_prefetch", "disp_imm", "disp_imm_32", "disp_imm_64",
34061 "disp_branch", "disp_cmp", "disp_jcc", "disp_last"
34064 /* Instruction path. */
34067 path_single, /* Single micro op. */
34068 path_double, /* Double micro op. */
34069 path_multi, /* Instructions with more than 2 micro op.. */
34073 /* sched_insn_info defines a window to the instructions scheduled in
34074 the basic block. It contains a pointer to the insn_info table and
34075 the instruction scheduled.
34077 Windows are allocated for each basic block and are linked
34079 typedef struct sched_insn_info_s {
34081 enum dispatch_group group;
34082 enum insn_path path;
34087 /* Linked list of dispatch windows. This is a two way list of
34088 dispatch windows of a basic block. It contains information about
34089 the number of uops in the window and the total number of
34090 instructions and of bytes in the object code for this dispatch
34092 typedef struct dispatch_windows_s {
34093 int num_insn; /* Number of insn in the window. */
34094 int num_uops; /* Number of uops in the window. */
34095 int window_size; /* Number of bytes in the window. */
34096 int window_num; /* Window number between 0 or 1. */
34097 int num_imm; /* Number of immediates in an insn. */
34098 int num_imm_32; /* Number of 32 bit immediates in an insn. */
34099 int num_imm_64; /* Number of 64 bit immediates in an insn. */
34100 int imm_size; /* Total immediates in the window. */
34101 int num_loads; /* Total memory loads in the window. */
34102 int num_stores; /* Total memory stores in the window. */
34103 int violation; /* Violation exists in window. */
34104 sched_insn_info *window; /* Pointer to the window. */
34105 struct dispatch_windows_s *next;
34106 struct dispatch_windows_s *prev;
34107 } dispatch_windows;
34109 /* Immediate valuse used in an insn. */
34110 typedef struct imm_info_s
34117 static dispatch_windows *dispatch_window_list;
34118 static dispatch_windows *dispatch_window_list1;
34120 /* Get dispatch group of insn. */
34122 static enum dispatch_group
34123 get_mem_group (rtx insn)
34125 enum attr_memory memory;
34127 if (INSN_CODE (insn) < 0)
34128 return disp_no_group;
34129 memory = get_attr_memory (insn);
34130 if (memory == MEMORY_STORE)
34133 if (memory == MEMORY_LOAD)
34136 if (memory == MEMORY_BOTH)
34137 return disp_load_store;
34139 return disp_no_group;
34142 /* Return true if insn is a compare instruction. */
34147 enum attr_type type;
34149 type = get_attr_type (insn);
34150 return (type == TYPE_TEST
34151 || type == TYPE_ICMP
34152 || type == TYPE_FCMP
34153 || GET_CODE (PATTERN (insn)) == COMPARE);
34156 /* Return true if a dispatch violation encountered. */
34159 dispatch_violation (void)
34161 if (dispatch_window_list->next)
34162 return dispatch_window_list->next->violation;
34163 return dispatch_window_list->violation;
34166 /* Return true if insn is a branch instruction. */
34169 is_branch (rtx insn)
34171 return (CALL_P (insn) || JUMP_P (insn));
34174 /* Return true if insn is a prefetch instruction. */
34177 is_prefetch (rtx insn)
34179 return NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == PREFETCH;
34182 /* This function initializes a dispatch window and the list container holding a
34183 pointer to the window. */
34186 init_window (int window_num)
34189 dispatch_windows *new_list;
34191 if (window_num == 0)
34192 new_list = dispatch_window_list;
34194 new_list = dispatch_window_list1;
34196 new_list->num_insn = 0;
34197 new_list->num_uops = 0;
34198 new_list->window_size = 0;
34199 new_list->next = NULL;
34200 new_list->prev = NULL;
34201 new_list->window_num = window_num;
34202 new_list->num_imm = 0;
34203 new_list->num_imm_32 = 0;
34204 new_list->num_imm_64 = 0;
34205 new_list->imm_size = 0;
34206 new_list->num_loads = 0;
34207 new_list->num_stores = 0;
34208 new_list->violation = false;
34210 for (i = 0; i < MAX_INSN; i++)
34212 new_list->window[i].insn = NULL;
34213 new_list->window[i].group = disp_no_group;
34214 new_list->window[i].path = no_path;
34215 new_list->window[i].byte_len = 0;
34216 new_list->window[i].imm_bytes = 0;
34221 /* This function allocates and initializes a dispatch window and the
34222 list container holding a pointer to the window. */
34224 static dispatch_windows *
34225 allocate_window (void)
34227 dispatch_windows *new_list = XNEW (struct dispatch_windows_s);
34228 new_list->window = XNEWVEC (struct sched_insn_info_s, MAX_INSN + 1);
34233 /* This routine initializes the dispatch scheduling information. It
34234 initiates building dispatch scheduler tables and constructs the
34235 first dispatch window. */
34238 init_dispatch_sched (void)
34240 /* Allocate a dispatch list and a window. */
34241 dispatch_window_list = allocate_window ();
34242 dispatch_window_list1 = allocate_window ();
34247 /* This function returns true if a branch is detected. End of a basic block
34248 does not have to be a branch, but here we assume only branches end a
34252 is_end_basic_block (enum dispatch_group group)
34254 return group == disp_branch;
34257 /* This function is called when the end of a window processing is reached. */
34260 process_end_window (void)
34262 gcc_assert (dispatch_window_list->num_insn <= MAX_INSN);
34263 if (dispatch_window_list->next)
34265 gcc_assert (dispatch_window_list1->num_insn <= MAX_INSN);
34266 gcc_assert (dispatch_window_list->window_size
34267 + dispatch_window_list1->window_size <= 48);
34273 /* Allocates a new dispatch window and adds it to WINDOW_LIST.
34274 WINDOW_NUM is either 0 or 1. A maximum of two windows are generated
34275 for 48 bytes of instructions. Note that these windows are not dispatch
34276 windows that their sizes are DISPATCH_WINDOW_SIZE. */
34278 static dispatch_windows *
34279 allocate_next_window (int window_num)
34281 if (window_num == 0)
34283 if (dispatch_window_list->next)
34286 return dispatch_window_list;
34289 dispatch_window_list->next = dispatch_window_list1;
34290 dispatch_window_list1->prev = dispatch_window_list;
34292 return dispatch_window_list1;
34295 /* Increment the number of immediate operands of an instruction. */
34298 find_constant_1 (rtx *in_rtx, imm_info *imm_values)
34303 switch ( GET_CODE (*in_rtx))
34308 (imm_values->imm)++;
34309 if (x86_64_immediate_operand (*in_rtx, SImode))
34310 (imm_values->imm32)++;
34312 (imm_values->imm64)++;
34316 (imm_values->imm)++;
34317 (imm_values->imm64)++;
34321 if (LABEL_KIND (*in_rtx) == LABEL_NORMAL)
34323 (imm_values->imm)++;
34324 (imm_values->imm32)++;
34335 /* Compute number of immediate operands of an instruction. */
34338 find_constant (rtx in_rtx, imm_info *imm_values)
34340 for_each_rtx (INSN_P (in_rtx) ? &PATTERN (in_rtx) : &in_rtx,
34341 (rtx_function) find_constant_1, (void *) imm_values);
34344 /* Return total size of immediate operands of an instruction along with number
34345 of corresponding immediate-operands. It initializes its parameters to zero
34346 befor calling FIND_CONSTANT.
34347 INSN is the input instruction. IMM is the total of immediates.
34348 IMM32 is the number of 32 bit immediates. IMM64 is the number of 64
34352 get_num_immediates (rtx insn, int *imm, int *imm32, int *imm64)
34354 imm_info imm_values = {0, 0, 0};
34356 find_constant (insn, &imm_values);
34357 *imm = imm_values.imm;
34358 *imm32 = imm_values.imm32;
34359 *imm64 = imm_values.imm64;
34360 return imm_values.imm32 * 4 + imm_values.imm64 * 8;
34363 /* This function indicates if an operand of an instruction is an
34367 has_immediate (rtx insn)
34369 int num_imm_operand;
34370 int num_imm32_operand;
34371 int num_imm64_operand;
34374 return get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34375 &num_imm64_operand);
34379 /* Return single or double path for instructions. */
34381 static enum insn_path
34382 get_insn_path (rtx insn)
34384 enum attr_amdfam10_decode path = get_attr_amdfam10_decode (insn);
34386 if ((int)path == 0)
34387 return path_single;
34389 if ((int)path == 1)
34390 return path_double;
34395 /* Return insn dispatch group. */
34397 static enum dispatch_group
34398 get_insn_group (rtx insn)
34400 enum dispatch_group group = get_mem_group (insn);
34404 if (is_branch (insn))
34405 return disp_branch;
34410 if (has_immediate (insn))
34413 if (is_prefetch (insn))
34414 return disp_prefetch;
34416 return disp_no_group;
34419 /* Count number of GROUP restricted instructions in a dispatch
34420 window WINDOW_LIST. */
34423 count_num_restricted (rtx insn, dispatch_windows *window_list)
34425 enum dispatch_group group = get_insn_group (insn);
34427 int num_imm_operand;
34428 int num_imm32_operand;
34429 int num_imm64_operand;
34431 if (group == disp_no_group)
34434 if (group == disp_imm)
34436 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34437 &num_imm64_operand);
34438 if (window_list->imm_size + imm_size > MAX_IMM_SIZE
34439 || num_imm_operand + window_list->num_imm > MAX_IMM
34440 || (num_imm32_operand > 0
34441 && (window_list->num_imm_32 + num_imm32_operand > MAX_IMM_32
34442 || window_list->num_imm_64 * 2 + num_imm32_operand > MAX_IMM_32))
34443 || (num_imm64_operand > 0
34444 && (window_list->num_imm_64 + num_imm64_operand > MAX_IMM_64
34445 || window_list->num_imm_32 + num_imm64_operand * 2 > MAX_IMM_32))
34446 || (window_list->imm_size + imm_size == MAX_IMM_SIZE
34447 && num_imm64_operand > 0
34448 && ((window_list->num_imm_64 > 0
34449 && window_list->num_insn >= 2)
34450 || window_list->num_insn >= 3)))
34456 if ((group == disp_load_store
34457 && (window_list->num_loads >= MAX_LOAD
34458 || window_list->num_stores >= MAX_STORE))
34459 || ((group == disp_load
34460 || group == disp_prefetch)
34461 && window_list->num_loads >= MAX_LOAD)
34462 || (group == disp_store
34463 && window_list->num_stores >= MAX_STORE))
34469 /* This function returns true if insn satisfies dispatch rules on the
34470 last window scheduled. */
34473 fits_dispatch_window (rtx insn)
34475 dispatch_windows *window_list = dispatch_window_list;
34476 dispatch_windows *window_list_next = dispatch_window_list->next;
34477 unsigned int num_restrict;
34478 enum dispatch_group group = get_insn_group (insn);
34479 enum insn_path path = get_insn_path (insn);
34482 /* Make disp_cmp and disp_jcc get scheduled at the latest. These
34483 instructions should be given the lowest priority in the
34484 scheduling process in Haifa scheduler to make sure they will be
34485 scheduled in the same dispatch window as the refrence to them. */
34486 if (group == disp_jcc || group == disp_cmp)
34489 /* Check nonrestricted. */
34490 if (group == disp_no_group || group == disp_branch)
34493 /* Get last dispatch window. */
34494 if (window_list_next)
34495 window_list = window_list_next;
34497 if (window_list->window_num == 1)
34499 sum = window_list->prev->window_size + window_list->window_size;
34502 || (min_insn_size (insn) + sum) >= 48)
34503 /* Window 1 is full. Go for next window. */
34507 num_restrict = count_num_restricted (insn, window_list);
34509 if (num_restrict > num_allowable_groups[group])
34512 /* See if it fits in the first window. */
34513 if (window_list->window_num == 0)
34515 /* The first widow should have only single and double path
34517 if (path == path_double
34518 && (window_list->num_uops + 2) > MAX_INSN)
34520 else if (path != path_single)
34526 /* Add an instruction INSN with NUM_UOPS micro-operations to the
34527 dispatch window WINDOW_LIST. */
34530 add_insn_window (rtx insn, dispatch_windows *window_list, int num_uops)
34532 int byte_len = min_insn_size (insn);
34533 int num_insn = window_list->num_insn;
34535 sched_insn_info *window = window_list->window;
34536 enum dispatch_group group = get_insn_group (insn);
34537 enum insn_path path = get_insn_path (insn);
34538 int num_imm_operand;
34539 int num_imm32_operand;
34540 int num_imm64_operand;
34542 if (!window_list->violation && group != disp_cmp
34543 && !fits_dispatch_window (insn))
34544 window_list->violation = true;
34546 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34547 &num_imm64_operand);
34549 /* Initialize window with new instruction. */
34550 window[num_insn].insn = insn;
34551 window[num_insn].byte_len = byte_len;
34552 window[num_insn].group = group;
34553 window[num_insn].path = path;
34554 window[num_insn].imm_bytes = imm_size;
34556 window_list->window_size += byte_len;
34557 window_list->num_insn = num_insn + 1;
34558 window_list->num_uops = window_list->num_uops + num_uops;
34559 window_list->imm_size += imm_size;
34560 window_list->num_imm += num_imm_operand;
34561 window_list->num_imm_32 += num_imm32_operand;
34562 window_list->num_imm_64 += num_imm64_operand;
34564 if (group == disp_store)
34565 window_list->num_stores += 1;
34566 else if (group == disp_load
34567 || group == disp_prefetch)
34568 window_list->num_loads += 1;
34569 else if (group == disp_load_store)
34571 window_list->num_stores += 1;
34572 window_list->num_loads += 1;
34576 /* Adds a scheduled instruction, INSN, to the current dispatch window.
34577 If the total bytes of instructions or the number of instructions in
34578 the window exceed allowable, it allocates a new window. */
34581 add_to_dispatch_window (rtx insn)
34584 dispatch_windows *window_list;
34585 dispatch_windows *next_list;
34586 dispatch_windows *window0_list;
34587 enum insn_path path;
34588 enum dispatch_group insn_group;
34596 if (INSN_CODE (insn) < 0)
34599 byte_len = min_insn_size (insn);
34600 window_list = dispatch_window_list;
34601 next_list = window_list->next;
34602 path = get_insn_path (insn);
34603 insn_group = get_insn_group (insn);
34605 /* Get the last dispatch window. */
34607 window_list = dispatch_window_list->next;
34609 if (path == path_single)
34611 else if (path == path_double)
34614 insn_num_uops = (int) path;
34616 /* If current window is full, get a new window.
34617 Window number zero is full, if MAX_INSN uops are scheduled in it.
34618 Window number one is full, if window zero's bytes plus window
34619 one's bytes is 32, or if the bytes of the new instruction added
34620 to the total makes it greater than 48, or it has already MAX_INSN
34621 instructions in it. */
34622 num_insn = window_list->num_insn;
34623 num_uops = window_list->num_uops;
34624 window_num = window_list->window_num;
34625 insn_fits = fits_dispatch_window (insn);
34627 if (num_insn >= MAX_INSN
34628 || num_uops + insn_num_uops > MAX_INSN
34631 window_num = ~window_num & 1;
34632 window_list = allocate_next_window (window_num);
34635 if (window_num == 0)
34637 add_insn_window (insn, window_list, insn_num_uops);
34638 if (window_list->num_insn >= MAX_INSN
34639 && insn_group == disp_branch)
34641 process_end_window ();
34645 else if (window_num == 1)
34647 window0_list = window_list->prev;
34648 sum = window0_list->window_size + window_list->window_size;
34650 || (byte_len + sum) >= 48)
34652 process_end_window ();
34653 window_list = dispatch_window_list;
34656 add_insn_window (insn, window_list, insn_num_uops);
34659 gcc_unreachable ();
34661 if (is_end_basic_block (insn_group))
34663 /* End of basic block is reached do end-basic-block process. */
34664 process_end_window ();
34669 /* Print the dispatch window, WINDOW_NUM, to FILE. */
34671 DEBUG_FUNCTION static void
34672 debug_dispatch_window_file (FILE *file, int window_num)
34674 dispatch_windows *list;
34677 if (window_num == 0)
34678 list = dispatch_window_list;
34680 list = dispatch_window_list1;
34682 fprintf (file, "Window #%d:\n", list->window_num);
34683 fprintf (file, " num_insn = %d, num_uops = %d, window_size = %d\n",
34684 list->num_insn, list->num_uops, list->window_size);
34685 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
34686 list->num_imm, list->num_imm_32, list->num_imm_64, list->imm_size);
34688 fprintf (file, " num_loads = %d, num_stores = %d\n", list->num_loads,
34690 fprintf (file, " insn info:\n");
34692 for (i = 0; i < MAX_INSN; i++)
34694 if (!list->window[i].insn)
34696 fprintf (file, " group[%d] = %s, insn[%d] = %p, path[%d] = %d byte_len[%d] = %d, imm_bytes[%d] = %d\n",
34697 i, group_name[list->window[i].group],
34698 i, (void *)list->window[i].insn,
34699 i, list->window[i].path,
34700 i, list->window[i].byte_len,
34701 i, list->window[i].imm_bytes);
34705 /* Print to stdout a dispatch window. */
34707 DEBUG_FUNCTION void
34708 debug_dispatch_window (int window_num)
34710 debug_dispatch_window_file (stdout, window_num);
34713 /* Print INSN dispatch information to FILE. */
34715 DEBUG_FUNCTION static void
34716 debug_insn_dispatch_info_file (FILE *file, rtx insn)
34719 enum insn_path path;
34720 enum dispatch_group group;
34722 int num_imm_operand;
34723 int num_imm32_operand;
34724 int num_imm64_operand;
34726 if (INSN_CODE (insn) < 0)
34729 byte_len = min_insn_size (insn);
34730 path = get_insn_path (insn);
34731 group = get_insn_group (insn);
34732 imm_size = get_num_immediates (insn, &num_imm_operand, &num_imm32_operand,
34733 &num_imm64_operand);
34735 fprintf (file, " insn info:\n");
34736 fprintf (file, " group = %s, path = %d, byte_len = %d\n",
34737 group_name[group], path, byte_len);
34738 fprintf (file, " num_imm = %d, num_imm_32 = %d, num_imm_64 = %d, imm_size = %d\n",
34739 num_imm_operand, num_imm32_operand, num_imm64_operand, imm_size);
34742 /* Print to STDERR the status of the ready list with respect to
34743 dispatch windows. */
34745 DEBUG_FUNCTION void
34746 debug_ready_dispatch (void)
34749 int no_ready = number_in_ready ();
34751 fprintf (stdout, "Number of ready: %d\n", no_ready);
34753 for (i = 0; i < no_ready; i++)
34754 debug_insn_dispatch_info_file (stdout, get_ready_element (i));
34757 /* This routine is the driver of the dispatch scheduler. */
34760 do_dispatch (rtx insn, int mode)
34762 if (mode == DISPATCH_INIT)
34763 init_dispatch_sched ();
34764 else if (mode == ADD_TO_DISPATCH_WINDOW)
34765 add_to_dispatch_window (insn);
34768 /* Return TRUE if Dispatch Scheduling is supported. */
34771 has_dispatch (rtx insn, int action)
34773 if ((ix86_tune == PROCESSOR_BDVER1 || ix86_tune == PROCESSOR_BDVER2)
34774 && flag_dispatch_scheduler)
34780 case IS_DISPATCH_ON:
34785 return is_cmp (insn);
34787 case DISPATCH_VIOLATION:
34788 return dispatch_violation ();
34790 case FITS_DISPATCH_WINDOW:
34791 return fits_dispatch_window (insn);
34797 /* ??? No autovectorization into MMX or 3DNOW until we can reliably
34798 place emms and femms instructions. */
34800 static enum machine_mode
34801 ix86_preferred_simd_mode (enum machine_mode mode)
34818 if (TARGET_AVX && !TARGET_PREFER_AVX128)
34824 if (!TARGET_VECTORIZE_DOUBLE)
34826 else if (TARGET_AVX && !TARGET_PREFER_AVX128)
34828 else if (TARGET_SSE2)
34837 /* If AVX is enabled then try vectorizing with both 256bit and 128bit
34840 static unsigned int
34841 ix86_autovectorize_vector_sizes (void)
34843 return (TARGET_AVX && !TARGET_PREFER_AVX128) ? 32 | 16 : 0;
34846 /* Initialize the GCC target structure. */
34847 #undef TARGET_RETURN_IN_MEMORY
34848 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
34850 #undef TARGET_LEGITIMIZE_ADDRESS
34851 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
34853 #undef TARGET_ATTRIBUTE_TABLE
34854 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
34855 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
34856 # undef TARGET_MERGE_DECL_ATTRIBUTES
34857 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
34860 #undef TARGET_COMP_TYPE_ATTRIBUTES
34861 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
34863 #undef TARGET_INIT_BUILTINS
34864 #define TARGET_INIT_BUILTINS ix86_init_builtins
34865 #undef TARGET_BUILTIN_DECL
34866 #define TARGET_BUILTIN_DECL ix86_builtin_decl
34867 #undef TARGET_EXPAND_BUILTIN
34868 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
34870 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
34871 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
34872 ix86_builtin_vectorized_function
34874 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
34875 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
34877 #undef TARGET_BUILTIN_RECIPROCAL
34878 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
34880 #undef TARGET_ASM_FUNCTION_EPILOGUE
34881 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
34883 #undef TARGET_ENCODE_SECTION_INFO
34884 #ifndef SUBTARGET_ENCODE_SECTION_INFO
34885 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
34887 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
34890 #undef TARGET_ASM_OPEN_PAREN
34891 #define TARGET_ASM_OPEN_PAREN ""
34892 #undef TARGET_ASM_CLOSE_PAREN
34893 #define TARGET_ASM_CLOSE_PAREN ""
34895 #undef TARGET_ASM_BYTE_OP
34896 #define TARGET_ASM_BYTE_OP ASM_BYTE
34898 #undef TARGET_ASM_ALIGNED_HI_OP
34899 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
34900 #undef TARGET_ASM_ALIGNED_SI_OP
34901 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
34903 #undef TARGET_ASM_ALIGNED_DI_OP
34904 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
34907 #undef TARGET_PROFILE_BEFORE_PROLOGUE
34908 #define TARGET_PROFILE_BEFORE_PROLOGUE ix86_profile_before_prologue
34910 #undef TARGET_ASM_UNALIGNED_HI_OP
34911 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
34912 #undef TARGET_ASM_UNALIGNED_SI_OP
34913 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
34914 #undef TARGET_ASM_UNALIGNED_DI_OP
34915 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
34917 #undef TARGET_PRINT_OPERAND
34918 #define TARGET_PRINT_OPERAND ix86_print_operand
34919 #undef TARGET_PRINT_OPERAND_ADDRESS
34920 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
34921 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
34922 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
34923 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
34924 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA i386_asm_output_addr_const_extra
34926 #undef TARGET_SCHED_INIT_GLOBAL
34927 #define TARGET_SCHED_INIT_GLOBAL ix86_sched_init_global
34928 #undef TARGET_SCHED_ADJUST_COST
34929 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
34930 #undef TARGET_SCHED_ISSUE_RATE
34931 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
34932 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
34933 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
34934 ia32_multipass_dfa_lookahead
34936 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
34937 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
34940 #undef TARGET_HAVE_TLS
34941 #define TARGET_HAVE_TLS true
34943 #undef TARGET_CANNOT_FORCE_CONST_MEM
34944 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
34945 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
34946 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
34948 #undef TARGET_DELEGITIMIZE_ADDRESS
34949 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
34951 #undef TARGET_MS_BITFIELD_LAYOUT_P
34952 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
34955 #undef TARGET_BINDS_LOCAL_P
34956 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
34958 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
34959 #undef TARGET_BINDS_LOCAL_P
34960 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
34963 #undef TARGET_ASM_OUTPUT_MI_THUNK
34964 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
34965 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
34966 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
34968 #undef TARGET_ASM_FILE_START
34969 #define TARGET_ASM_FILE_START x86_file_start
34971 #undef TARGET_OPTION_OVERRIDE
34972 #define TARGET_OPTION_OVERRIDE ix86_option_override
34974 #undef TARGET_REGISTER_MOVE_COST
34975 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
34976 #undef TARGET_MEMORY_MOVE_COST
34977 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
34978 #undef TARGET_RTX_COSTS
34979 #define TARGET_RTX_COSTS ix86_rtx_costs
34980 #undef TARGET_ADDRESS_COST
34981 #define TARGET_ADDRESS_COST ix86_address_cost
34983 #undef TARGET_FIXED_CONDITION_CODE_REGS
34984 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
34985 #undef TARGET_CC_MODES_COMPATIBLE
34986 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
34988 #undef TARGET_MACHINE_DEPENDENT_REORG
34989 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
34991 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
34992 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
34994 #undef TARGET_BUILD_BUILTIN_VA_LIST
34995 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
34997 #undef TARGET_ENUM_VA_LIST_P
34998 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
35000 #undef TARGET_FN_ABI_VA_LIST
35001 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
35003 #undef TARGET_CANONICAL_VA_LIST_TYPE
35004 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
35006 #undef TARGET_EXPAND_BUILTIN_VA_START
35007 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
35009 #undef TARGET_MD_ASM_CLOBBERS
35010 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
35012 #undef TARGET_PROMOTE_PROTOTYPES
35013 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
35014 #undef TARGET_STRUCT_VALUE_RTX
35015 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
35016 #undef TARGET_SETUP_INCOMING_VARARGS
35017 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
35018 #undef TARGET_MUST_PASS_IN_STACK
35019 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
35020 #undef TARGET_FUNCTION_ARG_ADVANCE
35021 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
35022 #undef TARGET_FUNCTION_ARG
35023 #define TARGET_FUNCTION_ARG ix86_function_arg
35024 #undef TARGET_FUNCTION_ARG_BOUNDARY
35025 #define TARGET_FUNCTION_ARG_BOUNDARY ix86_function_arg_boundary
35026 #undef TARGET_PASS_BY_REFERENCE
35027 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
35028 #undef TARGET_INTERNAL_ARG_POINTER
35029 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
35030 #undef TARGET_UPDATE_STACK_BOUNDARY
35031 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
35032 #undef TARGET_GET_DRAP_RTX
35033 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
35034 #undef TARGET_STRICT_ARGUMENT_NAMING
35035 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
35036 #undef TARGET_STATIC_CHAIN
35037 #define TARGET_STATIC_CHAIN ix86_static_chain
35038 #undef TARGET_TRAMPOLINE_INIT
35039 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
35040 #undef TARGET_RETURN_POPS_ARGS
35041 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
35043 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
35044 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
35046 #undef TARGET_SCALAR_MODE_SUPPORTED_P
35047 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
35049 #undef TARGET_VECTOR_MODE_SUPPORTED_P
35050 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
35052 #undef TARGET_C_MODE_FOR_SUFFIX
35053 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
35056 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
35057 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
35060 #ifdef SUBTARGET_INSERT_ATTRIBUTES
35061 #undef TARGET_INSERT_ATTRIBUTES
35062 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
35065 #undef TARGET_MANGLE_TYPE
35066 #define TARGET_MANGLE_TYPE ix86_mangle_type
35068 #ifndef TARGET_MACHO
35069 #undef TARGET_STACK_PROTECT_FAIL
35070 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
35073 #undef TARGET_FUNCTION_VALUE
35074 #define TARGET_FUNCTION_VALUE ix86_function_value
35076 #undef TARGET_FUNCTION_VALUE_REGNO_P
35077 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
35079 #undef TARGET_PROMOTE_FUNCTION_MODE
35080 #define TARGET_PROMOTE_FUNCTION_MODE ix86_promote_function_mode
35082 #undef TARGET_SECONDARY_RELOAD
35083 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
35085 #undef TARGET_CLASS_MAX_NREGS
35086 #define TARGET_CLASS_MAX_NREGS ix86_class_max_nregs
35088 #undef TARGET_PREFERRED_RELOAD_CLASS
35089 #define TARGET_PREFERRED_RELOAD_CLASS ix86_preferred_reload_class
35090 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
35091 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS ix86_preferred_output_reload_class
35092 #undef TARGET_CLASS_LIKELY_SPILLED_P
35093 #define TARGET_CLASS_LIKELY_SPILLED_P ix86_class_likely_spilled_p
35095 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
35096 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
35097 ix86_builtin_vectorization_cost
35098 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
35099 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
35100 ix86_vectorize_builtin_vec_perm
35101 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
35102 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
35103 ix86_vectorize_builtin_vec_perm_ok
35104 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
35105 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
35106 ix86_preferred_simd_mode
35107 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
35108 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
35109 ix86_autovectorize_vector_sizes
35111 #undef TARGET_SET_CURRENT_FUNCTION
35112 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
35114 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
35115 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
35117 #undef TARGET_OPTION_SAVE
35118 #define TARGET_OPTION_SAVE ix86_function_specific_save
35120 #undef TARGET_OPTION_RESTORE
35121 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
35123 #undef TARGET_OPTION_PRINT
35124 #define TARGET_OPTION_PRINT ix86_function_specific_print
35126 #undef TARGET_CAN_INLINE_P
35127 #define TARGET_CAN_INLINE_P ix86_can_inline_p
35129 #undef TARGET_EXPAND_TO_RTL_HOOK
35130 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
35132 #undef TARGET_LEGITIMATE_ADDRESS_P
35133 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
35135 #undef TARGET_LEGITIMATE_CONSTANT_P
35136 #define TARGET_LEGITIMATE_CONSTANT_P ix86_legitimate_constant_p
35138 #undef TARGET_FRAME_POINTER_REQUIRED
35139 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
35141 #undef TARGET_CAN_ELIMINATE
35142 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
35144 #undef TARGET_EXTRA_LIVE_ON_ENTRY
35145 #define TARGET_EXTRA_LIVE_ON_ENTRY ix86_live_on_entry
35147 #undef TARGET_ASM_CODE_END
35148 #define TARGET_ASM_CODE_END ix86_code_end
35150 #undef TARGET_CONDITIONAL_REGISTER_USAGE
35151 #define TARGET_CONDITIONAL_REGISTER_USAGE ix86_conditional_register_usage
35154 #undef TARGET_INIT_LIBFUNCS
35155 #define TARGET_INIT_LIBFUNCS darwin_rename_builtins
35158 struct gcc_target targetm = TARGET_INITIALIZER;
35160 #include "gt-i386.h"