1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
50 #include "tree-gimple.h"
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
64 /* Processor costs (relative to an add) */
66 struct processor_costs size_cost = { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
109 /* Processor costs (relative to an add) */
111 struct processor_costs i386_cost = { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
155 struct processor_costs i486_cost = { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
199 struct processor_costs pentium_cost = {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
243 struct processor_costs pentiumpro_cost = {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
287 struct processor_costs k6_cost = {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
331 struct processor_costs athlon_cost = {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
375 struct processor_costs k8_cost = {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
419 struct processor_costs pentium4_cost = {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
463 struct processor_costs nocona_cost = {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
506 const struct processor_costs *ix86_cost = &pentium_cost;
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
520 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
521 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
522 const int x86_zero_extend_with_and = m_486 | m_PENT;
523 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
524 const int x86_double_with_add = ~m_386;
525 const int x86_use_bit_test = m_386;
526 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
527 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
528 const int x86_fisttp = m_NOCONA;
529 const int x86_3dnow_a = m_ATHLON_K8;
530 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
531 /* Branch hints were put in P4 based on simulation result. But
532 after P4 was made, no performance benefit was observed with
533 branch hints. It also increases the code size. As the result,
534 icc never generates branch hints. */
535 const int x86_branch_hints = 0;
536 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
537 const int x86_partial_reg_stall = m_PPRO;
538 const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
539 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
540 const int x86_use_mov0 = m_K6;
541 const int x86_use_cltd = ~(m_PENT | m_K6);
542 const int x86_read_modify_write = ~m_PENT;
543 const int x86_read_modify = ~(m_PENT | m_PPRO);
544 const int x86_split_long_moves = m_PPRO;
545 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
546 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
547 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
548 const int x86_qimode_math = ~(0);
549 const int x86_promote_qi_regs = 0;
550 const int x86_himode_math = ~(m_PPRO);
551 const int x86_promote_hi_regs = m_PPRO;
552 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
553 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
554 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
555 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
556 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
557 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
558 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
559 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
560 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
561 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
562 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
563 const int x86_shift1 = ~m_486;
564 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
565 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
566 /* Set for machines where the type and dependencies are resolved on SSE
567 register parts instead of whole registers, so we may maintain just
568 lower part of scalar values in proper format leaving the upper part
570 const int x86_sse_split_regs = m_ATHLON_K8;
571 const int x86_sse_typeless_stores = m_ATHLON_K8;
572 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
573 const int x86_use_ffreep = m_ATHLON_K8;
574 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
576 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
577 integer data in xmm registers. Which results in pretty abysmal code. */
578 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
580 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
581 /* Some CPU cores are not able to predict more than 4 branch instructions in
582 the 16 byte window. */
583 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
584 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
585 const int x86_use_bt = m_ATHLON_K8;
586 /* Compare and exchange was added for 80486. */
587 const int x86_cmpxchg = ~m_386;
588 /* Exchange and add was added for 80486. */
589 const int x86_xadd = ~m_386;
591 /* In case the average insn count for single function invocation is
592 lower than this constant, emit fast (but longer) prologue and
594 #define FAST_PROLOGUE_INSN_COUNT 20
596 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
597 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
598 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
599 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
601 /* Array of the smallest class containing reg number REGNO, indexed by
602 REGNO. Used by REGNO_REG_CLASS in i386.h. */
604 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
607 AREG, DREG, CREG, BREG,
609 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
611 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
612 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
615 /* flags, fpsr, dirflag, frame */
616 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
617 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
619 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
621 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
622 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
623 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
627 /* The "default" register map used in 32bit mode. */
629 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
631 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
632 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
633 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
634 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
635 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
636 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
637 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
640 static int const x86_64_int_parameter_registers[6] =
642 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
643 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
646 static int const x86_64_int_return_registers[4] =
648 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
651 /* The "default" register map used in 64bit mode. */
652 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
654 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
655 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
656 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
657 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
658 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
659 8,9,10,11,12,13,14,15, /* extended integer registers */
660 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
663 /* Define the register numbers to be used in Dwarf debugging information.
664 The SVR4 reference port C compiler uses the following register numbers
665 in its Dwarf output code:
666 0 for %eax (gcc regno = 0)
667 1 for %ecx (gcc regno = 2)
668 2 for %edx (gcc regno = 1)
669 3 for %ebx (gcc regno = 3)
670 4 for %esp (gcc regno = 7)
671 5 for %ebp (gcc regno = 6)
672 6 for %esi (gcc regno = 4)
673 7 for %edi (gcc regno = 5)
674 The following three DWARF register numbers are never generated by
675 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
676 believes these numbers have these meanings.
677 8 for %eip (no gcc equivalent)
678 9 for %eflags (gcc regno = 17)
679 10 for %trapno (no gcc equivalent)
680 It is not at all clear how we should number the FP stack registers
681 for the x86 architecture. If the version of SDB on x86/svr4 were
682 a bit less brain dead with respect to floating-point then we would
683 have a precedent to follow with respect to DWARF register numbers
684 for x86 FP registers, but the SDB on x86/svr4 is so completely
685 broken with respect to FP registers that it is hardly worth thinking
686 of it as something to strive for compatibility with.
687 The version of x86/svr4 SDB I have at the moment does (partially)
688 seem to believe that DWARF register number 11 is associated with
689 the x86 register %st(0), but that's about all. Higher DWARF
690 register numbers don't seem to be associated with anything in
691 particular, and even for DWARF regno 11, SDB only seems to under-
692 stand that it should say that a variable lives in %st(0) (when
693 asked via an `=' command) if we said it was in DWARF regno 11,
694 but SDB still prints garbage when asked for the value of the
695 variable in question (via a `/' command).
696 (Also note that the labels SDB prints for various FP stack regs
697 when doing an `x' command are all wrong.)
698 Note that these problems generally don't affect the native SVR4
699 C compiler because it doesn't allow the use of -O with -g and
700 because when it is *not* optimizing, it allocates a memory
701 location for each floating-point variable, and the memory
702 location is what gets described in the DWARF AT_location
703 attribute for the variable in question.
704 Regardless of the severe mental illness of the x86/svr4 SDB, we
705 do something sensible here and we use the following DWARF
706 register numbers. Note that these are all stack-top-relative
708 11 for %st(0) (gcc regno = 8)
709 12 for %st(1) (gcc regno = 9)
710 13 for %st(2) (gcc regno = 10)
711 14 for %st(3) (gcc regno = 11)
712 15 for %st(4) (gcc regno = 12)
713 16 for %st(5) (gcc regno = 13)
714 17 for %st(6) (gcc regno = 14)
715 18 for %st(7) (gcc regno = 15)
717 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
719 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
720 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
721 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
722 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
723 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
724 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
725 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
728 /* Test and compare insns in i386.md store the information needed to
729 generate branch and scc insns here. */
731 rtx ix86_compare_op0 = NULL_RTX;
732 rtx ix86_compare_op1 = NULL_RTX;
733 rtx ix86_compare_emitted = NULL_RTX;
735 /* Size of the register save area. */
736 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
738 /* Define the structure for the machine field in struct function. */
740 struct stack_local_entry GTY(())
745 struct stack_local_entry *next;
748 /* Structure describing stack frame layout.
749 Stack grows downward:
755 saved frame pointer if frame_pointer_needed
756 <- HARD_FRAME_POINTER
762 > to_allocate <- FRAME_POINTER
774 int outgoing_arguments_size;
777 HOST_WIDE_INT to_allocate;
778 /* The offsets relative to ARG_POINTER. */
779 HOST_WIDE_INT frame_pointer_offset;
780 HOST_WIDE_INT hard_frame_pointer_offset;
781 HOST_WIDE_INT stack_pointer_offset;
783 /* When save_regs_using_mov is set, emit prologue using
784 move instead of push instructions. */
785 bool save_regs_using_mov;
788 /* Code model option. */
789 enum cmodel ix86_cmodel;
791 enum asm_dialect ix86_asm_dialect = ASM_ATT;
793 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
795 /* Which unit we are generating floating point math for. */
796 enum fpmath_unit ix86_fpmath;
798 /* Which cpu are we scheduling for. */
799 enum processor_type ix86_tune;
800 /* Which instruction set architecture to use. */
801 enum processor_type ix86_arch;
803 /* true if sse prefetch instruction is not NOOP. */
804 int x86_prefetch_sse;
806 /* ix86_regparm_string as a number */
807 static int ix86_regparm;
809 /* Preferred alignment for stack boundary in bits. */
810 unsigned int ix86_preferred_stack_boundary;
812 /* Values 1-5: see jump.c */
813 int ix86_branch_cost;
815 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
816 char internal_label_prefix[16];
817 int internal_label_prefix_len;
819 static bool ix86_handle_option (size_t, const char *, int);
820 static void output_pic_addr_const (FILE *, rtx, int);
821 static void put_condition_code (enum rtx_code, enum machine_mode,
823 static const char *get_some_local_dynamic_name (void);
824 static int get_some_local_dynamic_name_1 (rtx *, void *);
825 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
826 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
828 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
829 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
831 static rtx get_thread_pointer (int);
832 static rtx legitimize_tls_address (rtx, enum tls_model, int);
833 static void get_pc_thunk_name (char [32], unsigned int);
834 static rtx gen_push (rtx);
835 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
836 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
837 static struct machine_function * ix86_init_machine_status (void);
838 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
839 static int ix86_nsaved_regs (void);
840 static void ix86_emit_save_regs (void);
841 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
842 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
843 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
844 static HOST_WIDE_INT ix86_GOT_alias_set (void);
845 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
846 static rtx ix86_expand_aligntest (rtx, int);
847 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
848 static int ix86_issue_rate (void);
849 static int ix86_adjust_cost (rtx, rtx, rtx, int);
850 static int ia32_multipass_dfa_lookahead (void);
851 static void ix86_init_mmx_sse_builtins (void);
852 static rtx x86_this_parameter (tree);
853 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
854 HOST_WIDE_INT, tree);
855 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
856 static void x86_file_start (void);
857 static void ix86_reorg (void);
858 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
859 static tree ix86_build_builtin_va_list (void);
860 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
862 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
863 static bool ix86_vector_mode_supported_p (enum machine_mode);
865 static int ix86_address_cost (rtx);
866 static bool ix86_cannot_force_const_mem (rtx);
867 static rtx ix86_delegitimize_address (rtx);
869 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
871 struct builtin_description;
872 static rtx ix86_expand_sse_comi (const struct builtin_description *,
874 static rtx ix86_expand_sse_compare (const struct builtin_description *,
876 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
877 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
878 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
879 static rtx ix86_expand_store_builtin (enum insn_code, tree);
880 static rtx safe_vector_operand (rtx, enum machine_mode);
881 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
882 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
883 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
884 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
885 static int ix86_fp_comparison_cost (enum rtx_code code);
886 static unsigned int ix86_select_alt_pic_regnum (void);
887 static int ix86_save_reg (unsigned int, int);
888 static void ix86_compute_frame_layout (struct ix86_frame *);
889 static int ix86_comp_type_attributes (tree, tree);
890 static int ix86_function_regparm (tree, tree);
891 const struct attribute_spec ix86_attribute_table[];
892 static bool ix86_function_ok_for_sibcall (tree, tree);
893 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
894 static int ix86_value_regno (enum machine_mode, tree);
895 static bool contains_128bit_aligned_vector_p (tree);
896 static rtx ix86_struct_value_rtx (tree, int);
897 static bool ix86_ms_bitfield_layout_p (tree);
898 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
899 static int extended_reg_mentioned_1 (rtx *, void *);
900 static bool ix86_rtx_costs (rtx, int, int, int *);
901 static int min_insn_size (rtx);
902 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
903 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
904 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
906 static void ix86_init_builtins (void);
907 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
909 /* This function is only used on Solaris. */
910 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
913 /* Register class used for passing given 64bit part of the argument.
914 These represent classes as documented by the PS ABI, with the exception
915 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
916 use SF or DFmode move instead of DImode to avoid reformatting penalties.
918 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
919 whenever possible (upper half does contain padding).
921 enum x86_64_reg_class
924 X86_64_INTEGER_CLASS,
925 X86_64_INTEGERSI_CLASS,
932 X86_64_COMPLEX_X87_CLASS,
935 static const char * const x86_64_reg_class_name[] = {
936 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
937 "sseup", "x87", "x87up", "cplx87", "no"
940 #define MAX_CLASSES 4
942 /* Table of constants used by fldpi, fldln2, etc.... */
943 static REAL_VALUE_TYPE ext_80387_constants_table [5];
944 static bool ext_80387_constants_init = 0;
945 static void init_ext_80387_constants (void);
947 /* Initialize the GCC target structure. */
948 #undef TARGET_ATTRIBUTE_TABLE
949 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
950 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
951 # undef TARGET_MERGE_DECL_ATTRIBUTES
952 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
955 #undef TARGET_COMP_TYPE_ATTRIBUTES
956 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
958 #undef TARGET_INIT_BUILTINS
959 #define TARGET_INIT_BUILTINS ix86_init_builtins
960 #undef TARGET_EXPAND_BUILTIN
961 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
963 #undef TARGET_ASM_FUNCTION_EPILOGUE
964 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
966 #undef TARGET_ASM_OPEN_PAREN
967 #define TARGET_ASM_OPEN_PAREN ""
968 #undef TARGET_ASM_CLOSE_PAREN
969 #define TARGET_ASM_CLOSE_PAREN ""
971 #undef TARGET_ASM_ALIGNED_HI_OP
972 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
973 #undef TARGET_ASM_ALIGNED_SI_OP
974 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
976 #undef TARGET_ASM_ALIGNED_DI_OP
977 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
980 #undef TARGET_ASM_UNALIGNED_HI_OP
981 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
982 #undef TARGET_ASM_UNALIGNED_SI_OP
983 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
984 #undef TARGET_ASM_UNALIGNED_DI_OP
985 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
987 #undef TARGET_SCHED_ADJUST_COST
988 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
989 #undef TARGET_SCHED_ISSUE_RATE
990 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
991 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
992 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
993 ia32_multipass_dfa_lookahead
995 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
996 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
999 #undef TARGET_HAVE_TLS
1000 #define TARGET_HAVE_TLS true
1002 #undef TARGET_CANNOT_FORCE_CONST_MEM
1003 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1005 #undef TARGET_DELEGITIMIZE_ADDRESS
1006 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1008 #undef TARGET_MS_BITFIELD_LAYOUT_P
1009 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1012 #undef TARGET_BINDS_LOCAL_P
1013 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1016 #undef TARGET_ASM_OUTPUT_MI_THUNK
1017 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1018 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1019 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1021 #undef TARGET_ASM_FILE_START
1022 #define TARGET_ASM_FILE_START x86_file_start
1024 #undef TARGET_DEFAULT_TARGET_FLAGS
1025 #define TARGET_DEFAULT_TARGET_FLAGS \
1027 | TARGET_64BIT_DEFAULT \
1028 | TARGET_SUBTARGET_DEFAULT \
1029 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1031 #undef TARGET_HANDLE_OPTION
1032 #define TARGET_HANDLE_OPTION ix86_handle_option
1034 #undef TARGET_RTX_COSTS
1035 #define TARGET_RTX_COSTS ix86_rtx_costs
1036 #undef TARGET_ADDRESS_COST
1037 #define TARGET_ADDRESS_COST ix86_address_cost
1039 #undef TARGET_FIXED_CONDITION_CODE_REGS
1040 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1041 #undef TARGET_CC_MODES_COMPATIBLE
1042 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1044 #undef TARGET_MACHINE_DEPENDENT_REORG
1045 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1047 #undef TARGET_BUILD_BUILTIN_VA_LIST
1048 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1050 #undef TARGET_MD_ASM_CLOBBERS
1051 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1053 #undef TARGET_PROMOTE_PROTOTYPES
1054 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1055 #undef TARGET_STRUCT_VALUE_RTX
1056 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1057 #undef TARGET_SETUP_INCOMING_VARARGS
1058 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1059 #undef TARGET_MUST_PASS_IN_STACK
1060 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1061 #undef TARGET_PASS_BY_REFERENCE
1062 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1064 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1065 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1067 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1068 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1071 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1072 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1075 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1076 #undef TARGET_INSERT_ATTRIBUTES
1077 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1080 struct gcc_target targetm = TARGET_INITIALIZER;
1083 /* The svr4 ABI for the i386 says that records and unions are returned
1085 #ifndef DEFAULT_PCC_STRUCT_RETURN
1086 #define DEFAULT_PCC_STRUCT_RETURN 1
1089 /* Implement TARGET_HANDLE_OPTION. */
1092 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1099 target_flags &= ~MASK_3DNOW_A;
1100 target_flags_explicit |= MASK_3DNOW_A;
1107 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1108 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1115 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1116 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1123 target_flags &= ~MASK_SSE3;
1124 target_flags_explicit |= MASK_SSE3;
1133 /* Sometimes certain combinations of command options do not make
1134 sense on a particular target machine. You can define a macro
1135 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1136 defined, is executed once just after all the command options have
1139 Don't use this macro to turn on various extra optimizations for
1140 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1143 override_options (void)
1146 int ix86_tune_defaulted = 0;
1148 /* Comes from final.c -- no real reason to change it. */
1149 #define MAX_CODE_ALIGN 16
1153 const struct processor_costs *cost; /* Processor costs */
1154 const int target_enable; /* Target flags to enable. */
1155 const int target_disable; /* Target flags to disable. */
1156 const int align_loop; /* Default alignments. */
1157 const int align_loop_max_skip;
1158 const int align_jump;
1159 const int align_jump_max_skip;
1160 const int align_func;
1162 const processor_target_table[PROCESSOR_max] =
1164 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1165 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1166 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1167 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1168 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1169 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1170 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1171 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1172 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1175 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1178 const char *const name; /* processor name or nickname. */
1179 const enum processor_type processor;
1180 const enum pta_flags
1186 PTA_PREFETCH_SSE = 16,
1192 const processor_alias_table[] =
1194 {"i386", PROCESSOR_I386, 0},
1195 {"i486", PROCESSOR_I486, 0},
1196 {"i586", PROCESSOR_PENTIUM, 0},
1197 {"pentium", PROCESSOR_PENTIUM, 0},
1198 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1199 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1200 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1201 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1202 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1203 {"i686", PROCESSOR_PENTIUMPRO, 0},
1204 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1205 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1206 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1207 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1208 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1209 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1210 | PTA_MMX | PTA_PREFETCH_SSE},
1211 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1212 | PTA_MMX | PTA_PREFETCH_SSE},
1213 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1214 | PTA_MMX | PTA_PREFETCH_SSE},
1215 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1216 | PTA_MMX | PTA_PREFETCH_SSE},
1217 {"k6", PROCESSOR_K6, PTA_MMX},
1218 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1219 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1220 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1222 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1223 | PTA_3DNOW | PTA_3DNOW_A},
1224 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1225 | PTA_3DNOW_A | PTA_SSE},
1226 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1227 | PTA_3DNOW_A | PTA_SSE},
1228 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1229 | PTA_3DNOW_A | PTA_SSE},
1230 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1231 | PTA_SSE | PTA_SSE2 },
1232 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1233 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1234 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1235 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1236 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1237 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1238 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1239 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1242 int const pta_size = ARRAY_SIZE (processor_alias_table);
1244 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1245 SUBTARGET_OVERRIDE_OPTIONS;
1248 /* Set the default values for switches whose default depends on TARGET_64BIT
1249 in case they weren't overwritten by command line options. */
1252 if (flag_omit_frame_pointer == 2)
1253 flag_omit_frame_pointer = 1;
1254 if (flag_asynchronous_unwind_tables == 2)
1255 flag_asynchronous_unwind_tables = 1;
1256 if (flag_pcc_struct_return == 2)
1257 flag_pcc_struct_return = 0;
1261 if (flag_omit_frame_pointer == 2)
1262 flag_omit_frame_pointer = 0;
1263 if (flag_asynchronous_unwind_tables == 2)
1264 flag_asynchronous_unwind_tables = 0;
1265 if (flag_pcc_struct_return == 2)
1266 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1269 if (!ix86_tune_string && ix86_arch_string)
1270 ix86_tune_string = ix86_arch_string;
1271 if (!ix86_tune_string)
1273 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1274 ix86_tune_defaulted = 1;
1276 if (!ix86_arch_string)
1277 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1279 if (ix86_cmodel_string != 0)
1281 if (!strcmp (ix86_cmodel_string, "small"))
1282 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1284 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1285 else if (!strcmp (ix86_cmodel_string, "32"))
1286 ix86_cmodel = CM_32;
1287 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1288 ix86_cmodel = CM_KERNEL;
1289 else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic)
1290 ix86_cmodel = CM_MEDIUM;
1291 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1292 ix86_cmodel = CM_LARGE;
1294 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1298 ix86_cmodel = CM_32;
1300 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1302 if (ix86_asm_string != 0)
1304 if (!strcmp (ix86_asm_string, "intel"))
1305 ix86_asm_dialect = ASM_INTEL;
1306 else if (!strcmp (ix86_asm_string, "att"))
1307 ix86_asm_dialect = ASM_ATT;
1309 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1311 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1312 error ("code model %qs not supported in the %s bit mode",
1313 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1314 if (ix86_cmodel == CM_LARGE)
1315 sorry ("code model %<large%> not supported yet");
1316 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1317 sorry ("%i-bit mode not compiled in",
1318 (target_flags & MASK_64BIT) ? 64 : 32);
1320 for (i = 0; i < pta_size; i++)
1321 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1323 ix86_arch = processor_alias_table[i].processor;
1324 /* Default cpu tuning to the architecture. */
1325 ix86_tune = ix86_arch;
1326 if (processor_alias_table[i].flags & PTA_MMX
1327 && !(target_flags_explicit & MASK_MMX))
1328 target_flags |= MASK_MMX;
1329 if (processor_alias_table[i].flags & PTA_3DNOW
1330 && !(target_flags_explicit & MASK_3DNOW))
1331 target_flags |= MASK_3DNOW;
1332 if (processor_alias_table[i].flags & PTA_3DNOW_A
1333 && !(target_flags_explicit & MASK_3DNOW_A))
1334 target_flags |= MASK_3DNOW_A;
1335 if (processor_alias_table[i].flags & PTA_SSE
1336 && !(target_flags_explicit & MASK_SSE))
1337 target_flags |= MASK_SSE;
1338 if (processor_alias_table[i].flags & PTA_SSE2
1339 && !(target_flags_explicit & MASK_SSE2))
1340 target_flags |= MASK_SSE2;
1341 if (processor_alias_table[i].flags & PTA_SSE3
1342 && !(target_flags_explicit & MASK_SSE3))
1343 target_flags |= MASK_SSE3;
1344 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1345 x86_prefetch_sse = true;
1346 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1347 error ("CPU you selected does not support x86-64 "
1353 error ("bad value (%s) for -march= switch", ix86_arch_string);
1355 for (i = 0; i < pta_size; i++)
1356 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1358 ix86_tune = processor_alias_table[i].processor;
1359 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1361 if (ix86_tune_defaulted)
1363 ix86_tune_string = "x86-64";
1364 for (i = 0; i < pta_size; i++)
1365 if (! strcmp (ix86_tune_string,
1366 processor_alias_table[i].name))
1368 ix86_tune = processor_alias_table[i].processor;
1371 error ("CPU you selected does not support x86-64 "
1374 /* Intel CPUs have always interpreted SSE prefetch instructions as
1375 NOPs; so, we can enable SSE prefetch instructions even when
1376 -mtune (rather than -march) points us to a processor that has them.
1377 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1378 higher processors. */
1379 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1380 x86_prefetch_sse = true;
1384 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1387 ix86_cost = &size_cost;
1389 ix86_cost = processor_target_table[ix86_tune].cost;
1390 target_flags |= processor_target_table[ix86_tune].target_enable;
1391 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1393 /* Arrange to set up i386_stack_locals for all functions. */
1394 init_machine_status = ix86_init_machine_status;
1396 /* Validate -mregparm= value. */
1397 if (ix86_regparm_string)
1399 i = atoi (ix86_regparm_string);
1400 if (i < 0 || i > REGPARM_MAX)
1401 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1407 ix86_regparm = REGPARM_MAX;
1409 /* If the user has provided any of the -malign-* options,
1410 warn and use that value only if -falign-* is not set.
1411 Remove this code in GCC 3.2 or later. */
1412 if (ix86_align_loops_string)
1414 warning (0, "-malign-loops is obsolete, use -falign-loops");
1415 if (align_loops == 0)
1417 i = atoi (ix86_align_loops_string);
1418 if (i < 0 || i > MAX_CODE_ALIGN)
1419 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1421 align_loops = 1 << i;
1425 if (ix86_align_jumps_string)
1427 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
1428 if (align_jumps == 0)
1430 i = atoi (ix86_align_jumps_string);
1431 if (i < 0 || i > MAX_CODE_ALIGN)
1432 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1434 align_jumps = 1 << i;
1438 if (ix86_align_funcs_string)
1440 warning (0, "-malign-functions is obsolete, use -falign-functions");
1441 if (align_functions == 0)
1443 i = atoi (ix86_align_funcs_string);
1444 if (i < 0 || i > MAX_CODE_ALIGN)
1445 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1447 align_functions = 1 << i;
1451 /* Default align_* from the processor table. */
1452 if (align_loops == 0)
1454 align_loops = processor_target_table[ix86_tune].align_loop;
1455 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1457 if (align_jumps == 0)
1459 align_jumps = processor_target_table[ix86_tune].align_jump;
1460 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1462 if (align_functions == 0)
1464 align_functions = processor_target_table[ix86_tune].align_func;
1467 /* Validate -mpreferred-stack-boundary= value, or provide default.
1468 The default of 128 bits is for Pentium III's SSE __m128, but we
1469 don't want additional code to keep the stack aligned when
1470 optimizing for code size. */
1471 ix86_preferred_stack_boundary = (optimize_size
1472 ? TARGET_64BIT ? 128 : 32
1474 if (ix86_preferred_stack_boundary_string)
1476 i = atoi (ix86_preferred_stack_boundary_string);
1477 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1478 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1479 TARGET_64BIT ? 4 : 2);
1481 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1484 /* Validate -mbranch-cost= value, or provide default. */
1485 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1486 if (ix86_branch_cost_string)
1488 i = atoi (ix86_branch_cost_string);
1490 error ("-mbranch-cost=%d is not between 0 and 5", i);
1492 ix86_branch_cost = i;
1495 if (ix86_tls_dialect_string)
1497 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1498 ix86_tls_dialect = TLS_DIALECT_GNU;
1499 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1500 ix86_tls_dialect = TLS_DIALECT_SUN;
1502 error ("bad value (%s) for -mtls-dialect= switch",
1503 ix86_tls_dialect_string);
1506 /* Keep nonleaf frame pointers. */
1507 if (flag_omit_frame_pointer)
1508 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1509 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1510 flag_omit_frame_pointer = 1;
1512 /* If we're doing fast math, we don't care about comparison order
1513 wrt NaNs. This lets us use a shorter comparison sequence. */
1514 if (flag_unsafe_math_optimizations)
1515 target_flags &= ~MASK_IEEE_FP;
1517 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1518 since the insns won't need emulation. */
1519 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1520 target_flags &= ~MASK_NO_FANCY_MATH_387;
1522 /* Likewise, if the target doesn't have a 387, or we've specified
1523 software floating point, don't use 387 inline intrinsics. */
1525 target_flags |= MASK_NO_FANCY_MATH_387;
1527 /* Turn on SSE2 builtins for -msse3. */
1529 target_flags |= MASK_SSE2;
1531 /* Turn on SSE builtins for -msse2. */
1533 target_flags |= MASK_SSE;
1535 /* Turn on MMX builtins for -msse. */
1538 target_flags |= MASK_MMX & ~target_flags_explicit;
1539 x86_prefetch_sse = true;
1542 /* Turn on MMX builtins for 3Dnow. */
1544 target_flags |= MASK_MMX;
1548 if (TARGET_ALIGN_DOUBLE)
1549 error ("-malign-double makes no sense in the 64bit mode");
1551 error ("-mrtd calling convention not supported in the 64bit mode");
1553 /* Enable by default the SSE and MMX builtins. Do allow the user to
1554 explicitly disable any of these. In particular, disabling SSE and
1555 MMX for kernel code is extremely useful. */
1557 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1558 & ~target_flags_explicit);
1562 /* i386 ABI does not specify red zone. It still makes sense to use it
1563 when programmer takes care to stack from being destroyed. */
1564 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1565 target_flags |= MASK_NO_RED_ZONE;
1568 ix86_fpmath = TARGET_FPMATH_DEFAULT;
1570 if (ix86_fpmath_string != 0)
1572 if (! strcmp (ix86_fpmath_string, "387"))
1573 ix86_fpmath = FPMATH_387;
1574 else if (! strcmp (ix86_fpmath_string, "sse"))
1578 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1579 ix86_fpmath = FPMATH_387;
1582 ix86_fpmath = FPMATH_SSE;
1584 else if (! strcmp (ix86_fpmath_string, "387,sse")
1585 || ! strcmp (ix86_fpmath_string, "sse,387"))
1589 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1590 ix86_fpmath = FPMATH_387;
1592 else if (!TARGET_80387)
1594 warning (0, "387 instruction set disabled, using SSE arithmetics");
1595 ix86_fpmath = FPMATH_SSE;
1598 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1601 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1604 /* If the i387 is disabled, then do not return values in it. */
1606 target_flags &= ~MASK_FLOAT_RETURNS;
1608 if ((x86_accumulate_outgoing_args & TUNEMASK)
1609 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1611 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1613 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1616 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1617 p = strchr (internal_label_prefix, 'X');
1618 internal_label_prefix_len = p - internal_label_prefix;
1622 /* When scheduling description is not available, disable scheduler pass
1623 so it won't slow down the compilation and make x87 code slower. */
1624 if (!TARGET_SCHEDULE)
1625 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1629 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1631 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1632 make the problem with not enough registers even worse. */
1633 #ifdef INSN_SCHEDULING
1635 flag_schedule_insns = 0;
1639 /* The Darwin libraries never set errno, so we might as well
1640 avoid calling them when that's the only reason we would. */
1641 flag_errno_math = 0;
1643 /* The default values of these switches depend on the TARGET_64BIT
1644 that is not known at this moment. Mark these values with 2 and
1645 let user the to override these. In case there is no command line option
1646 specifying them, we will set the defaults in override_options. */
1648 flag_omit_frame_pointer = 2;
1649 flag_pcc_struct_return = 2;
1650 flag_asynchronous_unwind_tables = 2;
1651 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1652 SUBTARGET_OPTIMIZATION_OPTIONS;
1656 /* Table of valid machine attributes. */
1657 const struct attribute_spec ix86_attribute_table[] =
1659 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1660 /* Stdcall attribute says callee is responsible for popping arguments
1661 if they are not variable. */
1662 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1663 /* Fastcall attribute says callee is responsible for popping arguments
1664 if they are not variable. */
1665 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1666 /* Cdecl attribute says the callee is a normal C declaration */
1667 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1668 /* Regparm attribute specifies how many integer arguments are to be
1669 passed in registers. */
1670 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
1671 /* Sseregparm attribute says we are using x86_64 calling conventions
1672 for FP arguments. */
1673 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1674 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1675 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1676 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1677 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1679 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1680 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1681 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1682 SUBTARGET_ATTRIBUTE_TABLE,
1684 { NULL, 0, 0, false, false, false, NULL }
1687 /* Decide whether we can make a sibling call to a function. DECL is the
1688 declaration of the function being targeted by the call and EXP is the
1689 CALL_EXPR representing the call. */
1692 ix86_function_ok_for_sibcall (tree decl, tree exp)
1696 /* If we are generating position-independent code, we cannot sibcall
1697 optimize any indirect call, or a direct call to a global function,
1698 as the PLT requires %ebx be live. */
1699 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1707 /* If we are returning floats on the 80387 register stack, we cannot
1708 make a sibcall from a function that doesn't return a float to a
1709 function that does or, conversely, from a function that does return
1710 a float to a function that doesn't; the necessary stack adjustment
1711 would not be executed. */
1712 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp), func))
1713 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
1717 /* If this call is indirect, we'll need to be able to use a call-clobbered
1718 register for the address of the target function. Make sure that all
1719 such registers are not used for passing parameters. */
1720 if (!decl && !TARGET_64BIT)
1724 /* We're looking at the CALL_EXPR, we need the type of the function. */
1725 type = TREE_OPERAND (exp, 0); /* pointer expression */
1726 type = TREE_TYPE (type); /* pointer type */
1727 type = TREE_TYPE (type); /* function type */
1729 if (ix86_function_regparm (type, NULL) >= 3)
1731 /* ??? Need to count the actual number of registers to be used,
1732 not the possible number of registers. Fix later. */
1737 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1738 /* Dllimport'd functions are also called indirectly. */
1739 if (decl && lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl))
1740 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
1744 /* Otherwise okay. That also includes certain types of indirect calls. */
1748 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
1749 calling convention attributes;
1750 arguments as in struct attribute_spec.handler. */
1753 ix86_handle_cconv_attribute (tree *node, tree name,
1755 int flags ATTRIBUTE_UNUSED,
1758 if (TREE_CODE (*node) != FUNCTION_TYPE
1759 && TREE_CODE (*node) != METHOD_TYPE
1760 && TREE_CODE (*node) != FIELD_DECL
1761 && TREE_CODE (*node) != TYPE_DECL)
1763 warning (OPT_Wattributes, "%qs attribute only applies to functions",
1764 IDENTIFIER_POINTER (name));
1765 *no_add_attrs = true;
1769 /* Can combine regparm with all attributes but fastcall. */
1770 if (is_attribute_p ("regparm", name))
1774 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1776 error ("fastcall and regparm attributes are not compatible");
1779 cst = TREE_VALUE (args);
1780 if (TREE_CODE (cst) != INTEGER_CST)
1782 warning (OPT_Wattributes,
1783 "%qs attribute requires an integer constant argument",
1784 IDENTIFIER_POINTER (name));
1785 *no_add_attrs = true;
1787 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
1789 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
1790 IDENTIFIER_POINTER (name), REGPARM_MAX);
1791 *no_add_attrs = true;
1799 warning (OPT_Wattributes, "%qs attribute ignored",
1800 IDENTIFIER_POINTER (name));
1801 *no_add_attrs = true;
1805 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
1806 if (is_attribute_p ("fastcall", name))
1808 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
1810 error ("fastcall and cdecl attributes are not compatible");
1812 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1814 error ("fastcall and stdcall attributes are not compatible");
1816 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
1818 error ("fastcall and regparm attributes are not compatible");
1822 /* Can combine stdcall with fastcall (redundant), regparm and
1824 else if (is_attribute_p ("stdcall", name))
1826 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
1828 error ("stdcall and cdecl attributes are not compatible");
1830 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1832 error ("stdcall and fastcall attributes are not compatible");
1836 /* Can combine cdecl with regparm and sseregparm. */
1837 else if (is_attribute_p ("cdecl", name))
1839 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1841 error ("stdcall and cdecl attributes are not compatible");
1843 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1845 error ("fastcall and cdecl attributes are not compatible");
1849 /* Can combine sseregparm with all attributes. */
1854 /* Return 0 if the attributes for two types are incompatible, 1 if they
1855 are compatible, and 2 if they are nearly compatible (which causes a
1856 warning to be generated). */
1859 ix86_comp_type_attributes (tree type1, tree type2)
1861 /* Check for mismatch of non-default calling convention. */
1862 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
1864 if (TREE_CODE (type1) != FUNCTION_TYPE)
1867 /* Check for mismatched fastcall/regparm types. */
1868 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
1869 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
1870 || (ix86_function_regparm (type1, NULL)
1871 != ix86_function_regparm (type2, NULL)))
1874 /* Check for mismatched sseregparm types. */
1875 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
1876 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
1879 /* Check for mismatched return types (cdecl vs stdcall). */
1880 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
1881 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
1887 /* Return the regparm value for a function with the indicated TYPE and DECL.
1888 DECL may be NULL when calling function indirectly
1889 or considering a libcall. */
1892 ix86_function_regparm (tree type, tree decl)
1895 int regparm = ix86_regparm;
1896 bool user_convention = false;
1900 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
1903 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
1904 user_convention = true;
1907 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
1910 user_convention = true;
1913 /* Use register calling convention for local functions when possible. */
1914 if (!TARGET_64BIT && !user_convention && decl
1915 && flag_unit_at_a_time && !profile_flag)
1917 struct cgraph_local_info *i = cgraph_local_info (decl);
1920 /* We can't use regparm(3) for nested functions as these use
1921 static chain pointer in third argument. */
1922 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
1932 /* Return 1 or 2, if we can pass up to 8 SFmode (1) and DFmode (2) arguments
1933 in SSE registers for a function with the indicated TYPE and DECL.
1934 DECL may be NULL when calling function indirectly
1935 or considering a libcall. Otherwise return 0. */
1938 ix86_function_sseregparm (tree type, tree decl)
1940 /* Use SSE registers to pass SFmode and DFmode arguments if requested
1941 by the sseregparm attribute. */
1943 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type)))
1948 error ("Calling %qD with attribute sseregparm without "
1949 "SSE/SSE2 enabled", decl);
1951 error ("Calling %qT with attribute sseregparm without "
1952 "SSE/SSE2 enabled", type);
1959 /* For local functions, pass SFmode (and DFmode for SSE2) arguments
1960 in SSE registers even for 32-bit mode and not just 3, but up to
1961 8 SSE arguments in registers. */
1962 if (!TARGET_64BIT && decl
1963 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
1965 struct cgraph_local_info *i = cgraph_local_info (decl);
1967 return TARGET_SSE2 ? 2 : 1;
1973 /* Return true if EAX is live at the start of the function. Used by
1974 ix86_expand_prologue to determine if we need special help before
1975 calling allocate_stack_worker. */
1978 ix86_eax_live_at_start_p (void)
1980 /* Cheat. Don't bother working forward from ix86_function_regparm
1981 to the function type to whether an actual argument is located in
1982 eax. Instead just look at cfg info, which is still close enough
1983 to correct at this point. This gives false positives for broken
1984 functions that might use uninitialized data that happens to be
1985 allocated in eax, but who cares? */
1986 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
1989 /* Value is the number of bytes of arguments automatically
1990 popped when returning from a subroutine call.
1991 FUNDECL is the declaration node of the function (as a tree),
1992 FUNTYPE is the data type of the function (as a tree),
1993 or for a library call it is an identifier node for the subroutine name.
1994 SIZE is the number of bytes of arguments passed on the stack.
1996 On the 80386, the RTD insn may be used to pop them if the number
1997 of args is fixed, but if the number is variable then the caller
1998 must pop them all. RTD can't be used for library calls now
1999 because the library is compiled with the Unix compiler.
2000 Use of RTD is a selectable option, since it is incompatible with
2001 standard Unix calling sequences. If the option is not selected,
2002 the caller must always pop the args.
2004 The attribute stdcall is equivalent to RTD on a per module basis. */
2007 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2009 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2011 /* Cdecl functions override -mrtd, and never pop the stack. */
2012 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2014 /* Stdcall and fastcall functions will pop the stack if not
2016 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2017 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2021 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2022 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2023 == void_type_node)))
2027 /* Lose any fake structure return argument if it is passed on the stack. */
2028 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2030 && !KEEP_AGGREGATE_RETURN_POINTER)
2032 int nregs = ix86_function_regparm (funtype, fundecl);
2035 return GET_MODE_SIZE (Pmode);
2041 /* Argument support functions. */
2043 /* Return true when register may be used to pass function parameters. */
2045 ix86_function_arg_regno_p (int regno)
2049 return (regno < REGPARM_MAX
2050 || (TARGET_MMX && MMX_REGNO_P (regno)
2051 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2052 || (TARGET_SSE && SSE_REGNO_P (regno)
2053 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2055 if (TARGET_SSE && SSE_REGNO_P (regno)
2056 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2058 /* RAX is used as hidden argument to va_arg functions. */
2061 for (i = 0; i < REGPARM_MAX; i++)
2062 if (regno == x86_64_int_parameter_registers[i])
2067 /* Return if we do not know how to pass TYPE solely in registers. */
2070 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2072 if (must_pass_in_stack_var_size_or_pad (mode, type))
2075 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2076 The layout_type routine is crafty and tries to trick us into passing
2077 currently unsupported vector types on the stack by using TImode. */
2078 return (!TARGET_64BIT && mode == TImode
2079 && type && TREE_CODE (type) != VECTOR_TYPE);
2082 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2083 for a call to a function whose data type is FNTYPE.
2084 For a library call, FNTYPE is 0. */
2087 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2088 tree fntype, /* tree ptr for function decl */
2089 rtx libname, /* SYMBOL_REF of library name or 0 */
2092 static CUMULATIVE_ARGS zero_cum;
2093 tree param, next_param;
2095 if (TARGET_DEBUG_ARG)
2097 fprintf (stderr, "\ninit_cumulative_args (");
2099 fprintf (stderr, "fntype code = %s, ret code = %s",
2100 tree_code_name[(int) TREE_CODE (fntype)],
2101 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2103 fprintf (stderr, "no fntype");
2106 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2111 /* Set up the number of registers to use for passing arguments. */
2112 cum->nregs = ix86_regparm;
2114 cum->sse_nregs = SSE_REGPARM_MAX;
2116 cum->mmx_nregs = MMX_REGPARM_MAX;
2117 cum->warn_sse = true;
2118 cum->warn_mmx = true;
2119 cum->maybe_vaarg = false;
2121 /* Use ecx and edx registers if function has fastcall attribute,
2122 else look for regparm information. */
2123 if (fntype && !TARGET_64BIT)
2125 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2131 cum->nregs = ix86_function_regparm (fntype, fndecl);
2134 /* Set up the number of SSE registers used for passing SFmode
2135 and DFmode arguments. Warn for mismatching ABI. */
2136 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2138 /* Determine if this function has variable arguments. This is
2139 indicated by the last argument being 'void_type_mode' if there
2140 are no variable arguments. If there are variable arguments, then
2141 we won't pass anything in registers in 32-bit mode. */
2143 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2145 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2146 param != 0; param = next_param)
2148 next_param = TREE_CHAIN (param);
2149 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2159 cum->float_in_sse = 0;
2161 cum->maybe_vaarg = true;
2165 if ((!fntype && !libname)
2166 || (fntype && !TYPE_ARG_TYPES (fntype)))
2167 cum->maybe_vaarg = true;
2169 if (TARGET_DEBUG_ARG)
2170 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2175 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2176 But in the case of vector types, it is some vector mode.
2178 When we have only some of our vector isa extensions enabled, then there
2179 are some modes for which vector_mode_supported_p is false. For these
2180 modes, the generic vector support in gcc will choose some non-vector mode
2181 in order to implement the type. By computing the natural mode, we'll
2182 select the proper ABI location for the operand and not depend on whatever
2183 the middle-end decides to do with these vector types. */
2185 static enum machine_mode
2186 type_natural_mode (tree type)
2188 enum machine_mode mode = TYPE_MODE (type);
2190 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2192 HOST_WIDE_INT size = int_size_in_bytes (type);
2193 if ((size == 8 || size == 16)
2194 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2195 && TYPE_VECTOR_SUBPARTS (type) > 1)
2197 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2199 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2200 mode = MIN_MODE_VECTOR_FLOAT;
2202 mode = MIN_MODE_VECTOR_INT;
2204 /* Get the mode which has this inner mode and number of units. */
2205 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2206 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2207 && GET_MODE_INNER (mode) == innermode)
2217 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2218 this may not agree with the mode that the type system has chosen for the
2219 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2220 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2223 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2228 if (orig_mode != BLKmode)
2229 tmp = gen_rtx_REG (orig_mode, regno);
2232 tmp = gen_rtx_REG (mode, regno);
2233 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2234 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2240 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2241 of this code is to classify each 8bytes of incoming argument by the register
2242 class and assign registers accordingly. */
2244 /* Return the union class of CLASS1 and CLASS2.
2245 See the x86-64 PS ABI for details. */
2247 static enum x86_64_reg_class
2248 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2250 /* Rule #1: If both classes are equal, this is the resulting class. */
2251 if (class1 == class2)
2254 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2256 if (class1 == X86_64_NO_CLASS)
2258 if (class2 == X86_64_NO_CLASS)
2261 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2262 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2263 return X86_64_MEMORY_CLASS;
2265 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2266 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2267 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2268 return X86_64_INTEGERSI_CLASS;
2269 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2270 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2271 return X86_64_INTEGER_CLASS;
2273 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2275 if (class1 == X86_64_X87_CLASS
2276 || class1 == X86_64_X87UP_CLASS
2277 || class1 == X86_64_COMPLEX_X87_CLASS
2278 || class2 == X86_64_X87_CLASS
2279 || class2 == X86_64_X87UP_CLASS
2280 || class2 == X86_64_COMPLEX_X87_CLASS)
2281 return X86_64_MEMORY_CLASS;
2283 /* Rule #6: Otherwise class SSE is used. */
2284 return X86_64_SSE_CLASS;
2287 /* Classify the argument of type TYPE and mode MODE.
2288 CLASSES will be filled by the register class used to pass each word
2289 of the operand. The number of words is returned. In case the parameter
2290 should be passed in memory, 0 is returned. As a special case for zero
2291 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2293 BIT_OFFSET is used internally for handling records and specifies offset
2294 of the offset in bits modulo 256 to avoid overflow cases.
2296 See the x86-64 PS ABI for details.
2300 classify_argument (enum machine_mode mode, tree type,
2301 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2303 HOST_WIDE_INT bytes =
2304 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2305 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2307 /* Variable sized entities are always passed/returned in memory. */
2311 if (mode != VOIDmode
2312 && targetm.calls.must_pass_in_stack (mode, type))
2315 if (type && AGGREGATE_TYPE_P (type))
2319 enum x86_64_reg_class subclasses[MAX_CLASSES];
2321 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2325 for (i = 0; i < words; i++)
2326 classes[i] = X86_64_NO_CLASS;
2328 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2329 signalize memory class, so handle it as special case. */
2332 classes[0] = X86_64_NO_CLASS;
2336 /* Classify each field of record and merge classes. */
2337 switch (TREE_CODE (type))
2340 /* For classes first merge in the field of the subclasses. */
2341 if (TYPE_BINFO (type))
2343 tree binfo, base_binfo;
2346 for (binfo = TYPE_BINFO (type), basenum = 0;
2347 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2350 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2351 tree type = BINFO_TYPE (base_binfo);
2353 num = classify_argument (TYPE_MODE (type),
2355 (offset + bit_offset) % 256);
2358 for (i = 0; i < num; i++)
2360 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2362 merge_classes (subclasses[i], classes[i + pos]);
2366 /* And now merge the fields of structure. */
2367 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2369 if (TREE_CODE (field) == FIELD_DECL)
2373 /* Bitfields are always classified as integer. Handle them
2374 early, since later code would consider them to be
2375 misaligned integers. */
2376 if (DECL_BIT_FIELD (field))
2378 for (i = int_bit_position (field) / 8 / 8;
2379 i < (int_bit_position (field)
2380 + tree_low_cst (DECL_SIZE (field), 0)
2383 merge_classes (X86_64_INTEGER_CLASS,
2388 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2389 TREE_TYPE (field), subclasses,
2390 (int_bit_position (field)
2391 + bit_offset) % 256);
2394 for (i = 0; i < num; i++)
2397 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2399 merge_classes (subclasses[i], classes[i + pos]);
2407 /* Arrays are handled as small records. */
2410 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2411 TREE_TYPE (type), subclasses, bit_offset);
2415 /* The partial classes are now full classes. */
2416 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2417 subclasses[0] = X86_64_SSE_CLASS;
2418 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2419 subclasses[0] = X86_64_INTEGER_CLASS;
2421 for (i = 0; i < words; i++)
2422 classes[i] = subclasses[i % num];
2427 case QUAL_UNION_TYPE:
2428 /* Unions are similar to RECORD_TYPE but offset is always 0.
2431 /* Unions are not derived. */
2432 gcc_assert (!TYPE_BINFO (type)
2433 || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
2434 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2436 if (TREE_CODE (field) == FIELD_DECL)
2439 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2440 TREE_TYPE (field), subclasses,
2444 for (i = 0; i < num; i++)
2445 classes[i] = merge_classes (subclasses[i], classes[i]);
2454 /* Final merger cleanup. */
2455 for (i = 0; i < words; i++)
2457 /* If one class is MEMORY, everything should be passed in
2459 if (classes[i] == X86_64_MEMORY_CLASS)
2462 /* The X86_64_SSEUP_CLASS should be always preceded by
2463 X86_64_SSE_CLASS. */
2464 if (classes[i] == X86_64_SSEUP_CLASS
2465 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2466 classes[i] = X86_64_SSE_CLASS;
2468 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2469 if (classes[i] == X86_64_X87UP_CLASS
2470 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2471 classes[i] = X86_64_SSE_CLASS;
2476 /* Compute alignment needed. We align all types to natural boundaries with
2477 exception of XFmode that is aligned to 64bits. */
2478 if (mode != VOIDmode && mode != BLKmode)
2480 int mode_alignment = GET_MODE_BITSIZE (mode);
2483 mode_alignment = 128;
2484 else if (mode == XCmode)
2485 mode_alignment = 256;
2486 if (COMPLEX_MODE_P (mode))
2487 mode_alignment /= 2;
2488 /* Misaligned fields are always returned in memory. */
2489 if (bit_offset % mode_alignment)
2493 /* for V1xx modes, just use the base mode */
2494 if (VECTOR_MODE_P (mode)
2495 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2496 mode = GET_MODE_INNER (mode);
2498 /* Classification of atomic types. */
2508 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2509 classes[0] = X86_64_INTEGERSI_CLASS;
2511 classes[0] = X86_64_INTEGER_CLASS;
2515 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2520 if (!(bit_offset % 64))
2521 classes[0] = X86_64_SSESF_CLASS;
2523 classes[0] = X86_64_SSE_CLASS;
2526 classes[0] = X86_64_SSEDF_CLASS;
2529 classes[0] = X86_64_X87_CLASS;
2530 classes[1] = X86_64_X87UP_CLASS;
2533 classes[0] = X86_64_SSE_CLASS;
2534 classes[1] = X86_64_SSEUP_CLASS;
2537 classes[0] = X86_64_SSE_CLASS;
2540 classes[0] = X86_64_SSEDF_CLASS;
2541 classes[1] = X86_64_SSEDF_CLASS;
2544 classes[0] = X86_64_COMPLEX_X87_CLASS;
2547 /* This modes is larger than 16 bytes. */
2555 classes[0] = X86_64_SSE_CLASS;
2556 classes[1] = X86_64_SSEUP_CLASS;
2562 classes[0] = X86_64_SSE_CLASS;
2568 gcc_assert (VECTOR_MODE_P (mode));
2573 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
2575 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2576 classes[0] = X86_64_INTEGERSI_CLASS;
2578 classes[0] = X86_64_INTEGER_CLASS;
2579 classes[1] = X86_64_INTEGER_CLASS;
2580 return 1 + (bytes > 8);
2584 /* Examine the argument and return set number of register required in each
2585 class. Return 0 iff parameter should be passed in memory. */
2587 examine_argument (enum machine_mode mode, tree type, int in_return,
2588 int *int_nregs, int *sse_nregs)
2590 enum x86_64_reg_class class[MAX_CLASSES];
2591 int n = classify_argument (mode, type, class, 0);
2597 for (n--; n >= 0; n--)
2600 case X86_64_INTEGER_CLASS:
2601 case X86_64_INTEGERSI_CLASS:
2604 case X86_64_SSE_CLASS:
2605 case X86_64_SSESF_CLASS:
2606 case X86_64_SSEDF_CLASS:
2609 case X86_64_NO_CLASS:
2610 case X86_64_SSEUP_CLASS:
2612 case X86_64_X87_CLASS:
2613 case X86_64_X87UP_CLASS:
2617 case X86_64_COMPLEX_X87_CLASS:
2618 return in_return ? 2 : 0;
2619 case X86_64_MEMORY_CLASS:
2625 /* Construct container for the argument used by GCC interface. See
2626 FUNCTION_ARG for the detailed description. */
2629 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2630 tree type, int in_return, int nintregs, int nsseregs,
2631 const int *intreg, int sse_regno)
2633 enum machine_mode tmpmode;
2635 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2636 enum x86_64_reg_class class[MAX_CLASSES];
2640 int needed_sseregs, needed_intregs;
2641 rtx exp[MAX_CLASSES];
2644 n = classify_argument (mode, type, class, 0);
2645 if (TARGET_DEBUG_ARG)
2648 fprintf (stderr, "Memory class\n");
2651 fprintf (stderr, "Classes:");
2652 for (i = 0; i < n; i++)
2654 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2656 fprintf (stderr, "\n");
2661 if (!examine_argument (mode, type, in_return, &needed_intregs,
2664 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2667 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2668 some less clueful developer tries to use floating-point anyway. */
2669 if (needed_sseregs && !TARGET_SSE)
2671 static bool issued_error;
2674 issued_error = true;
2676 error ("SSE register return with SSE disabled");
2678 error ("SSE register argument with SSE disabled");
2683 /* First construct simple cases. Avoid SCmode, since we want to use
2684 single register to pass this type. */
2685 if (n == 1 && mode != SCmode)
2688 case X86_64_INTEGER_CLASS:
2689 case X86_64_INTEGERSI_CLASS:
2690 return gen_rtx_REG (mode, intreg[0]);
2691 case X86_64_SSE_CLASS:
2692 case X86_64_SSESF_CLASS:
2693 case X86_64_SSEDF_CLASS:
2694 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2695 case X86_64_X87_CLASS:
2696 case X86_64_COMPLEX_X87_CLASS:
2697 return gen_rtx_REG (mode, FIRST_STACK_REG);
2698 case X86_64_NO_CLASS:
2699 /* Zero sized array, struct or class. */
2704 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2706 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2708 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2709 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2710 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2711 && class[1] == X86_64_INTEGER_CLASS
2712 && (mode == CDImode || mode == TImode || mode == TFmode)
2713 && intreg[0] + 1 == intreg[1])
2714 return gen_rtx_REG (mode, intreg[0]);
2716 /* Otherwise figure out the entries of the PARALLEL. */
2717 for (i = 0; i < n; i++)
2721 case X86_64_NO_CLASS:
2723 case X86_64_INTEGER_CLASS:
2724 case X86_64_INTEGERSI_CLASS:
2725 /* Merge TImodes on aligned occasions here too. */
2726 if (i * 8 + 8 > bytes)
2727 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2728 else if (class[i] == X86_64_INTEGERSI_CLASS)
2732 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2733 if (tmpmode == BLKmode)
2735 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2736 gen_rtx_REG (tmpmode, *intreg),
2740 case X86_64_SSESF_CLASS:
2741 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2742 gen_rtx_REG (SFmode,
2743 SSE_REGNO (sse_regno)),
2747 case X86_64_SSEDF_CLASS:
2748 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2749 gen_rtx_REG (DFmode,
2750 SSE_REGNO (sse_regno)),
2754 case X86_64_SSE_CLASS:
2755 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2759 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2760 gen_rtx_REG (tmpmode,
2761 SSE_REGNO (sse_regno)),
2763 if (tmpmode == TImode)
2772 /* Empty aligned struct, union or class. */
2776 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2777 for (i = 0; i < nexps; i++)
2778 XVECEXP (ret, 0, i) = exp [i];
2782 /* Update the data in CUM to advance over an argument
2783 of mode MODE and data type TYPE.
2784 (TYPE is null for libcalls where that information may not be available.) */
2787 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2788 tree type, int named)
2791 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2792 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2795 mode = type_natural_mode (type);
2797 if (TARGET_DEBUG_ARG)
2798 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
2799 "mode=%s, named=%d)\n\n",
2800 words, cum->words, cum->nregs, cum->sse_nregs,
2801 GET_MODE_NAME (mode), named);
2805 int int_nregs, sse_nregs;
2806 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
2807 cum->words += words;
2808 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
2810 cum->nregs -= int_nregs;
2811 cum->sse_nregs -= sse_nregs;
2812 cum->regno += int_nregs;
2813 cum->sse_regno += sse_nregs;
2816 cum->words += words;
2834 cum->words += words;
2835 cum->nregs -= words;
2836 cum->regno += words;
2838 if (cum->nregs <= 0)
2846 if (cum->float_in_sse < 2)
2849 if (cum->float_in_sse < 1)
2860 if (!type || !AGGREGATE_TYPE_P (type))
2862 cum->sse_words += words;
2863 cum->sse_nregs -= 1;
2864 cum->sse_regno += 1;
2865 if (cum->sse_nregs <= 0)
2877 if (!type || !AGGREGATE_TYPE_P (type))
2879 cum->mmx_words += words;
2880 cum->mmx_nregs -= 1;
2881 cum->mmx_regno += 1;
2882 if (cum->mmx_nregs <= 0)
2893 /* Define where to put the arguments to a function.
2894 Value is zero to push the argument on the stack,
2895 or a hard register in which to store the argument.
2897 MODE is the argument's machine mode.
2898 TYPE is the data type of the argument (as a tree).
2899 This is null for libcalls where that information may
2901 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2902 the preceding args and about the function being called.
2903 NAMED is nonzero if this argument is a named parameter
2904 (otherwise it is an extra parameter matching an ellipsis). */
2907 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
2908 tree type, int named)
2910 enum machine_mode mode = orig_mode;
2913 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2914 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2915 static bool warnedsse, warnedmmx;
2917 /* To simplify the code below, represent vector types with a vector mode
2918 even if MMX/SSE are not active. */
2919 if (type && TREE_CODE (type) == VECTOR_TYPE)
2920 mode = type_natural_mode (type);
2922 /* Handle a hidden AL argument containing number of registers for varargs
2923 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2925 if (mode == VOIDmode)
2928 return GEN_INT (cum->maybe_vaarg
2929 ? (cum->sse_nregs < 0
2937 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
2939 &x86_64_int_parameter_registers [cum->regno],
2944 /* For now, pass fp/complex values on the stack. */
2956 if (words <= cum->nregs)
2958 int regno = cum->regno;
2960 /* Fastcall allocates the first two DWORD (SImode) or
2961 smaller arguments to ECX and EDX. */
2964 if (mode == BLKmode || mode == DImode)
2967 /* ECX not EAX is the first allocated register. */
2971 ret = gen_rtx_REG (mode, regno);
2975 if (cum->float_in_sse < 2)
2978 if (cum->float_in_sse < 1)
2988 if (!type || !AGGREGATE_TYPE_P (type))
2990 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
2993 warning (0, "SSE vector argument without SSE enabled "
2997 ret = gen_reg_or_parallel (mode, orig_mode,
2998 cum->sse_regno + FIRST_SSE_REG);
3005 if (!type || !AGGREGATE_TYPE_P (type))
3007 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3010 warning (0, "MMX vector argument without MMX enabled "
3014 ret = gen_reg_or_parallel (mode, orig_mode,
3015 cum->mmx_regno + FIRST_MMX_REG);
3020 if (TARGET_DEBUG_ARG)
3023 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3024 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3027 print_simple_rtl (stderr, ret);
3029 fprintf (stderr, ", stack");
3031 fprintf (stderr, " )\n");
3037 /* A C expression that indicates when an argument must be passed by
3038 reference. If nonzero for an argument, a copy of that argument is
3039 made in memory and a pointer to the argument is passed instead of
3040 the argument itself. The pointer is passed in whatever way is
3041 appropriate for passing a pointer to that type. */
3044 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3045 enum machine_mode mode ATTRIBUTE_UNUSED,
3046 tree type, bool named ATTRIBUTE_UNUSED)
3051 if (type && int_size_in_bytes (type) == -1)
3053 if (TARGET_DEBUG_ARG)
3054 fprintf (stderr, "function_arg_pass_by_reference\n");
3061 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3062 ABI. Only called if TARGET_SSE. */
3064 contains_128bit_aligned_vector_p (tree type)
3066 enum machine_mode mode = TYPE_MODE (type);
3067 if (SSE_REG_MODE_P (mode)
3068 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3070 if (TYPE_ALIGN (type) < 128)
3073 if (AGGREGATE_TYPE_P (type))
3075 /* Walk the aggregates recursively. */
3076 switch (TREE_CODE (type))
3080 case QUAL_UNION_TYPE:
3084 if (TYPE_BINFO (type))
3086 tree binfo, base_binfo;
3089 for (binfo = TYPE_BINFO (type), i = 0;
3090 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
3091 if (contains_128bit_aligned_vector_p
3092 (BINFO_TYPE (base_binfo)))
3095 /* And now merge the fields of structure. */
3096 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3098 if (TREE_CODE (field) == FIELD_DECL
3099 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3106 /* Just for use if some languages passes arrays by value. */
3107 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3117 /* Gives the alignment boundary, in bits, of an argument with the
3118 specified mode and type. */
3121 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3125 align = TYPE_ALIGN (type);
3127 align = GET_MODE_ALIGNMENT (mode);
3128 if (align < PARM_BOUNDARY)
3129 align = PARM_BOUNDARY;
3132 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3133 make an exception for SSE modes since these require 128bit
3136 The handling here differs from field_alignment. ICC aligns MMX
3137 arguments to 4 byte boundaries, while structure fields are aligned
3138 to 8 byte boundaries. */
3140 align = PARM_BOUNDARY;
3143 if (!SSE_REG_MODE_P (mode))
3144 align = PARM_BOUNDARY;
3148 if (!contains_128bit_aligned_vector_p (type))
3149 align = PARM_BOUNDARY;
3157 /* Return true if N is a possible register number of function value. */
3159 ix86_function_value_regno_p (int regno)
3162 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3163 || (regno == FIRST_SSE_REG && TARGET_SSE))
3167 && (regno == FIRST_MMX_REG && TARGET_MMX))
3173 /* Define how to find the value returned by a function.
3174 VALTYPE is the data type of the value (as a tree).
3175 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3176 otherwise, FUNC is 0. */
3178 ix86_function_value (tree valtype, tree func)
3180 enum machine_mode natmode = type_natural_mode (valtype);
3184 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3185 1, REGPARM_MAX, SSE_REGPARM_MAX,
3186 x86_64_int_return_registers, 0);
3187 /* For zero sized structures, construct_container return NULL, but we
3188 need to keep rest of compiler happy by returning meaningful value. */
3190 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3194 return gen_rtx_REG (TYPE_MODE (valtype), ix86_value_regno (natmode, func));
3197 /* Return false iff type is returned in memory. */
3199 ix86_return_in_memory (tree type)
3201 int needed_intregs, needed_sseregs, size;
3202 enum machine_mode mode = type_natural_mode (type);
3205 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3207 if (mode == BLKmode)
3210 size = int_size_in_bytes (type);
3212 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3215 if (VECTOR_MODE_P (mode) || mode == TImode)
3217 /* User-created vectors small enough to fit in EAX. */
3221 /* MMX/3dNow values are returned in MM0,
3222 except when it doesn't exits. */
3224 return (TARGET_MMX ? 0 : 1);
3226 /* SSE values are returned in XMM0, except when it doesn't exist. */
3228 return (TARGET_SSE ? 0 : 1);
3239 /* When returning SSE vector types, we have a choice of either
3240 (1) being abi incompatible with a -march switch, or
3241 (2) generating an error.
3242 Given no good solution, I think the safest thing is one warning.
3243 The user won't be able to use -Werror, but....
3245 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3246 called in response to actually generating a caller or callee that
3247 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3248 via aggregate_value_p for general type probing from tree-ssa. */
3251 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3253 static bool warnedsse, warnedmmx;
3257 /* Look at the return type of the function, not the function type. */
3258 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3260 if (!TARGET_SSE && !warnedsse)
3263 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3266 warning (0, "SSE vector return without SSE enabled "
3271 if (!TARGET_MMX && !warnedmmx)
3273 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
3276 warning (0, "MMX vector return without MMX enabled "
3285 /* Define how to find the value returned by a library function
3286 assuming the value has mode MODE. */
3288 ix86_libcall_value (enum machine_mode mode)
3299 return gen_rtx_REG (mode, FIRST_SSE_REG);
3302 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3306 return gen_rtx_REG (mode, 0);
3310 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL));
3313 /* Given a mode, return the register to use for a return value. */
3316 ix86_value_regno (enum machine_mode mode, tree func)
3318 gcc_assert (!TARGET_64BIT);
3320 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
3321 we prevent this case when mmx is not available. */
3322 if ((VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8))
3323 return FIRST_MMX_REG;
3325 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3326 we prevent this case when sse is not available. */
3327 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3328 return FIRST_SSE_REG;
3330 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
3331 if (GET_MODE_CLASS (mode) != MODE_FLOAT || !TARGET_FLOAT_RETURNS_IN_80387)
3334 /* Floating point return values in %st(0), except for local functions when
3335 SSE math is enabled or for functions with sseregparm attribute. */
3336 if (func && (mode == SFmode || mode == DFmode))
3338 int sse_level = ix86_function_sseregparm (TREE_TYPE (func), func);
3339 if ((sse_level >= 1 && mode == SFmode)
3340 || (sse_level == 2 && mode == DFmode))
3341 return FIRST_SSE_REG;
3344 return FIRST_FLOAT_REG;
3347 /* Create the va_list data type. */
3350 ix86_build_builtin_va_list (void)
3352 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3354 /* For i386 we use plain pointer to argument area. */
3356 return build_pointer_type (char_type_node);
3358 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3359 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3361 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3362 unsigned_type_node);
3363 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3364 unsigned_type_node);
3365 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3367 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3370 va_list_gpr_counter_field = f_gpr;
3371 va_list_fpr_counter_field = f_fpr;
3373 DECL_FIELD_CONTEXT (f_gpr) = record;
3374 DECL_FIELD_CONTEXT (f_fpr) = record;
3375 DECL_FIELD_CONTEXT (f_ovf) = record;
3376 DECL_FIELD_CONTEXT (f_sav) = record;
3378 TREE_CHAIN (record) = type_decl;
3379 TYPE_NAME (record) = type_decl;
3380 TYPE_FIELDS (record) = f_gpr;
3381 TREE_CHAIN (f_gpr) = f_fpr;
3382 TREE_CHAIN (f_fpr) = f_ovf;
3383 TREE_CHAIN (f_ovf) = f_sav;
3385 layout_type (record);
3387 /* The correct type is an array type of one element. */
3388 return build_array_type (record, build_index_type (size_zero_node));
3391 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3394 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3395 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3398 CUMULATIVE_ARGS next_cum;
3399 rtx save_area = NULL_RTX, mem;
3412 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
3415 /* Indicate to allocate space on the stack for varargs save area. */
3416 ix86_save_varrargs_registers = 1;
3418 cfun->stack_alignment_needed = 128;
3420 fntype = TREE_TYPE (current_function_decl);
3421 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3422 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3423 != void_type_node));
3425 /* For varargs, we do not want to skip the dummy va_dcl argument.
3426 For stdargs, we do want to skip the last named argument. */
3429 function_arg_advance (&next_cum, mode, type, 1);
3432 save_area = frame_pointer_rtx;
3434 set = get_varargs_alias_set ();
3436 for (i = next_cum.regno;
3438 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
3441 mem = gen_rtx_MEM (Pmode,
3442 plus_constant (save_area, i * UNITS_PER_WORD));
3443 set_mem_alias_set (mem, set);
3444 emit_move_insn (mem, gen_rtx_REG (Pmode,
3445 x86_64_int_parameter_registers[i]));
3448 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
3450 /* Now emit code to save SSE registers. The AX parameter contains number
3451 of SSE parameter registers used to call this function. We use
3452 sse_prologue_save insn template that produces computed jump across
3453 SSE saves. We need some preparation work to get this working. */
3455 label = gen_label_rtx ();
3456 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3458 /* Compute address to jump to :
3459 label - 5*eax + nnamed_sse_arguments*5 */
3460 tmp_reg = gen_reg_rtx (Pmode);
3461 nsse_reg = gen_reg_rtx (Pmode);
3462 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3463 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3464 gen_rtx_MULT (Pmode, nsse_reg,
3466 if (next_cum.sse_regno)
3469 gen_rtx_CONST (DImode,
3470 gen_rtx_PLUS (DImode,
3472 GEN_INT (next_cum.sse_regno * 4))));
3474 emit_move_insn (nsse_reg, label_ref);
3475 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3477 /* Compute address of memory block we save into. We always use pointer
3478 pointing 127 bytes after first byte to store - this is needed to keep
3479 instruction size limited by 4 bytes. */
3480 tmp_reg = gen_reg_rtx (Pmode);
3481 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3482 plus_constant (save_area,
3483 8 * REGPARM_MAX + 127)));
3484 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3485 set_mem_alias_set (mem, set);
3486 set_mem_align (mem, BITS_PER_WORD);
3488 /* And finally do the dirty job! */
3489 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3490 GEN_INT (next_cum.sse_regno), label));
3495 /* Implement va_start. */
3498 ix86_va_start (tree valist, rtx nextarg)
3500 HOST_WIDE_INT words, n_gpr, n_fpr;
3501 tree f_gpr, f_fpr, f_ovf, f_sav;
3502 tree gpr, fpr, ovf, sav, t;
3504 /* Only 64bit target needs something special. */
3507 std_expand_builtin_va_start (valist, nextarg);
3511 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3512 f_fpr = TREE_CHAIN (f_gpr);
3513 f_ovf = TREE_CHAIN (f_fpr);
3514 f_sav = TREE_CHAIN (f_ovf);
3516 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3517 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3518 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3519 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3520 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3522 /* Count number of gp and fp argument registers used. */
3523 words = current_function_args_info.words;
3524 n_gpr = current_function_args_info.regno;
3525 n_fpr = current_function_args_info.sse_regno;
3527 if (TARGET_DEBUG_ARG)
3528 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3529 (int) words, (int) n_gpr, (int) n_fpr);
3531 if (cfun->va_list_gpr_size)
3533 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3534 build_int_cst (NULL_TREE, n_gpr * 8));
3535 TREE_SIDE_EFFECTS (t) = 1;
3536 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3539 if (cfun->va_list_fpr_size)
3541 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3542 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3543 TREE_SIDE_EFFECTS (t) = 1;
3544 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3547 /* Find the overflow area. */
3548 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3550 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3551 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3552 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3553 TREE_SIDE_EFFECTS (t) = 1;
3554 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3556 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
3558 /* Find the register save area.
3559 Prologue of the function save it right above stack frame. */
3560 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3561 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3562 TREE_SIDE_EFFECTS (t) = 1;
3563 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3567 /* Implement va_arg. */
3570 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3572 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3573 tree f_gpr, f_fpr, f_ovf, f_sav;
3574 tree gpr, fpr, ovf, sav, t;
3576 tree lab_false, lab_over = NULL_TREE;
3581 enum machine_mode nat_mode;
3583 /* Only 64bit target needs something special. */
3585 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3587 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3588 f_fpr = TREE_CHAIN (f_gpr);
3589 f_ovf = TREE_CHAIN (f_fpr);
3590 f_sav = TREE_CHAIN (f_ovf);
3592 valist = build_va_arg_indirect_ref (valist);
3593 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3594 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3595 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3596 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3598 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3600 type = build_pointer_type (type);
3601 size = int_size_in_bytes (type);
3602 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3604 nat_mode = type_natural_mode (type);
3605 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3606 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3608 /* Pull the value out of the saved registers. */
3610 addr = create_tmp_var (ptr_type_node, "addr");
3611 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3615 int needed_intregs, needed_sseregs;
3617 tree int_addr, sse_addr;
3619 lab_false = create_artificial_label ();
3620 lab_over = create_artificial_label ();
3622 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3624 need_temp = (!REG_P (container)
3625 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3626 || TYPE_ALIGN (type) > 128));
3628 /* In case we are passing structure, verify that it is consecutive block
3629 on the register save area. If not we need to do moves. */
3630 if (!need_temp && !REG_P (container))
3632 /* Verify that all registers are strictly consecutive */
3633 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3637 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3639 rtx slot = XVECEXP (container, 0, i);
3640 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3641 || INTVAL (XEXP (slot, 1)) != i * 16)
3649 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3651 rtx slot = XVECEXP (container, 0, i);
3652 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3653 || INTVAL (XEXP (slot, 1)) != i * 8)
3665 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3666 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3667 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3668 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3671 /* First ensure that we fit completely in registers. */
3674 t = build_int_cst (TREE_TYPE (gpr),
3675 (REGPARM_MAX - needed_intregs + 1) * 8);
3676 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3677 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3678 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3679 gimplify_and_add (t, pre_p);
3683 t = build_int_cst (TREE_TYPE (fpr),
3684 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3686 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3687 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3688 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3689 gimplify_and_add (t, pre_p);
3692 /* Compute index to start of area used for integer regs. */
3695 /* int_addr = gpr + sav; */
3696 t = fold_convert (ptr_type_node, gpr);
3697 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3698 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3699 gimplify_and_add (t, pre_p);
3703 /* sse_addr = fpr + sav; */
3704 t = fold_convert (ptr_type_node, fpr);
3705 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3706 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3707 gimplify_and_add (t, pre_p);
3712 tree temp = create_tmp_var (type, "va_arg_tmp");
3715 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3716 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3717 gimplify_and_add (t, pre_p);
3719 for (i = 0; i < XVECLEN (container, 0); i++)
3721 rtx slot = XVECEXP (container, 0, i);
3722 rtx reg = XEXP (slot, 0);
3723 enum machine_mode mode = GET_MODE (reg);
3724 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3725 tree addr_type = build_pointer_type (piece_type);
3728 tree dest_addr, dest;
3730 if (SSE_REGNO_P (REGNO (reg)))
3732 src_addr = sse_addr;
3733 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3737 src_addr = int_addr;
3738 src_offset = REGNO (reg) * 8;
3740 src_addr = fold_convert (addr_type, src_addr);
3741 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
3742 size_int (src_offset)));
3743 src = build_va_arg_indirect_ref (src_addr);
3745 dest_addr = fold_convert (addr_type, addr);
3746 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
3747 size_int (INTVAL (XEXP (slot, 1)))));
3748 dest = build_va_arg_indirect_ref (dest_addr);
3750 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
3751 gimplify_and_add (t, pre_p);
3757 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3758 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
3759 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3760 gimplify_and_add (t, pre_p);
3764 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3765 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
3766 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3767 gimplify_and_add (t, pre_p);
3770 t = build1 (GOTO_EXPR, void_type_node, lab_over);
3771 gimplify_and_add (t, pre_p);
3773 t = build1 (LABEL_EXPR, void_type_node, lab_false);
3774 append_to_statement_list (t, pre_p);
3777 /* ... otherwise out of the overflow area. */
3779 /* Care for on-stack alignment if needed. */
3780 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
3784 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
3785 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
3786 build_int_cst (TREE_TYPE (ovf), align - 1));
3787 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3788 build_int_cst (TREE_TYPE (t), -align));
3790 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
3792 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
3793 gimplify_and_add (t2, pre_p);
3795 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
3796 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
3797 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3798 gimplify_and_add (t, pre_p);
3802 t = build1 (LABEL_EXPR, void_type_node, lab_over);
3803 append_to_statement_list (t, pre_p);
3806 ptrtype = build_pointer_type (type);
3807 addr = fold_convert (ptrtype, addr);
3810 addr = build_va_arg_indirect_ref (addr);
3811 return build_va_arg_indirect_ref (addr);
3814 /* Return nonzero if OPNUM's MEM should be matched
3815 in movabs* patterns. */
3818 ix86_check_movabs (rtx insn, int opnum)
3822 set = PATTERN (insn);
3823 if (GET_CODE (set) == PARALLEL)
3824 set = XVECEXP (set, 0, 0);
3825 gcc_assert (GET_CODE (set) == SET);
3826 mem = XEXP (set, opnum);
3827 while (GET_CODE (mem) == SUBREG)
3828 mem = SUBREG_REG (mem);
3829 gcc_assert (GET_CODE (mem) == MEM);
3830 return (volatile_ok || !MEM_VOLATILE_P (mem));
3833 /* Initialize the table of extra 80387 mathematical constants. */
3836 init_ext_80387_constants (void)
3838 static const char * cst[5] =
3840 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
3841 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
3842 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
3843 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
3844 "3.1415926535897932385128089594061862044", /* 4: fldpi */
3848 for (i = 0; i < 5; i++)
3850 real_from_string (&ext_80387_constants_table[i], cst[i]);
3851 /* Ensure each constant is rounded to XFmode precision. */
3852 real_convert (&ext_80387_constants_table[i],
3853 XFmode, &ext_80387_constants_table[i]);
3856 ext_80387_constants_init = 1;
3859 /* Return true if the constant is something that can be loaded with
3860 a special instruction. */
3863 standard_80387_constant_p (rtx x)
3865 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
3868 if (x == CONST0_RTX (GET_MODE (x)))
3870 if (x == CONST1_RTX (GET_MODE (x)))
3873 /* For XFmode constants, try to find a special 80387 instruction when
3874 optimizing for size or on those CPUs that benefit from them. */
3875 if (GET_MODE (x) == XFmode
3876 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
3881 if (! ext_80387_constants_init)
3882 init_ext_80387_constants ();
3884 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3885 for (i = 0; i < 5; i++)
3886 if (real_identical (&r, &ext_80387_constants_table[i]))
3893 /* Return the opcode of the special instruction to be used to load
3897 standard_80387_constant_opcode (rtx x)
3899 switch (standard_80387_constant_p (x))
3920 /* Return the CONST_DOUBLE representing the 80387 constant that is
3921 loaded by the specified special instruction. The argument IDX
3922 matches the return value from standard_80387_constant_p. */
3925 standard_80387_constant_rtx (int idx)
3929 if (! ext_80387_constants_init)
3930 init_ext_80387_constants ();
3946 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
3950 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
3953 standard_sse_constant_p (rtx x)
3955 if (x == const0_rtx)
3957 return (x == CONST0_RTX (GET_MODE (x)));
3960 /* Returns 1 if OP contains a symbol reference */
3963 symbolic_reference_mentioned_p (rtx op)
3968 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3971 fmt = GET_RTX_FORMAT (GET_CODE (op));
3972 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3978 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3979 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3983 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3990 /* Return 1 if it is appropriate to emit `ret' instructions in the
3991 body of a function. Do this only if the epilogue is simple, needing a
3992 couple of insns. Prior to reloading, we can't tell how many registers
3993 must be saved, so return 0 then. Return 0 if there is no frame
3994 marker to de-allocate. */
3997 ix86_can_use_return_insn_p (void)
3999 struct ix86_frame frame;
4001 if (! reload_completed || frame_pointer_needed)
4004 /* Don't allow more than 32 pop, since that's all we can do
4005 with one instruction. */
4006 if (current_function_pops_args
4007 && current_function_args_size >= 32768)
4010 ix86_compute_frame_layout (&frame);
4011 return frame.to_allocate == 0 && frame.nregs == 0;
4014 /* Value should be nonzero if functions must have frame pointers.
4015 Zero means the frame pointer need not be set up (and parms may
4016 be accessed via the stack pointer) in functions that seem suitable. */
4019 ix86_frame_pointer_required (void)
4021 /* If we accessed previous frames, then the generated code expects
4022 to be able to access the saved ebp value in our frame. */
4023 if (cfun->machine->accesses_prev_frame)
4026 /* Several x86 os'es need a frame pointer for other reasons,
4027 usually pertaining to setjmp. */
4028 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4031 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4032 the frame pointer by default. Turn it back on now if we've not
4033 got a leaf function. */
4034 if (TARGET_OMIT_LEAF_FRAME_POINTER
4035 && (!current_function_is_leaf))
4038 if (current_function_profile)
4044 /* Record that the current function accesses previous call frames. */
4047 ix86_setup_frame_addresses (void)
4049 cfun->machine->accesses_prev_frame = 1;
4052 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
4053 # define USE_HIDDEN_LINKONCE 1
4055 # define USE_HIDDEN_LINKONCE 0
4058 static int pic_labels_used;
4060 /* Fills in the label name that should be used for a pc thunk for
4061 the given register. */
4064 get_pc_thunk_name (char name[32], unsigned int regno)
4066 if (USE_HIDDEN_LINKONCE)
4067 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
4069 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
4073 /* This function generates code for -fpic that loads %ebx with
4074 the return address of the caller and then returns. */
4077 ix86_file_end (void)
4082 for (regno = 0; regno < 8; ++regno)
4086 if (! ((pic_labels_used >> regno) & 1))
4089 get_pc_thunk_name (name, regno);
4091 if (USE_HIDDEN_LINKONCE)
4095 decl = build_decl (FUNCTION_DECL, get_identifier (name),
4097 TREE_PUBLIC (decl) = 1;
4098 TREE_STATIC (decl) = 1;
4099 DECL_ONE_ONLY (decl) = 1;
4101 (*targetm.asm_out.unique_section) (decl, 0);
4102 named_section (decl, NULL, 0);
4104 (*targetm.asm_out.globalize_label) (asm_out_file, name);
4105 fputs ("\t.hidden\t", asm_out_file);
4106 assemble_name (asm_out_file, name);
4107 fputc ('\n', asm_out_file);
4108 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
4113 ASM_OUTPUT_LABEL (asm_out_file, name);
4116 xops[0] = gen_rtx_REG (SImode, regno);
4117 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
4118 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
4119 output_asm_insn ("ret", xops);
4122 if (NEED_INDICATE_EXEC_STACK)
4123 file_end_indicate_exec_stack ();
4126 /* Emit code for the SET_GOT patterns. */
4129 output_set_got (rtx dest)
4134 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
4136 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
4138 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
4141 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4143 output_asm_insn ("call\t%a2", xops);
4146 /* Output the "canonical" label name ("Lxx$pb") here too. This
4147 is what will be referred to by the Mach-O PIC subsystem. */
4148 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4150 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4151 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
4154 output_asm_insn ("pop{l}\t%0", xops);
4159 get_pc_thunk_name (name, REGNO (dest));
4160 pic_labels_used |= 1 << REGNO (dest);
4162 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4163 xops[2] = gen_rtx_MEM (QImode, xops[2]);
4164 output_asm_insn ("call\t%X2", xops);
4167 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
4168 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
4169 else if (!TARGET_MACHO)
4170 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops);
4175 /* Generate an "push" pattern for input ARG. */
4180 return gen_rtx_SET (VOIDmode,
4182 gen_rtx_PRE_DEC (Pmode,
4183 stack_pointer_rtx)),
4187 /* Return >= 0 if there is an unused call-clobbered register available
4188 for the entire function. */
4191 ix86_select_alt_pic_regnum (void)
4193 if (current_function_is_leaf && !current_function_profile)
4196 for (i = 2; i >= 0; --i)
4197 if (!regs_ever_live[i])
4201 return INVALID_REGNUM;
4204 /* Return 1 if we need to save REGNO. */
4206 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4208 if (pic_offset_table_rtx
4209 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4210 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4211 || current_function_profile
4212 || current_function_calls_eh_return
4213 || current_function_uses_const_pool))
4215 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4220 if (current_function_calls_eh_return && maybe_eh_return)
4225 unsigned test = EH_RETURN_DATA_REGNO (i);
4226 if (test == INVALID_REGNUM)
4233 return (regs_ever_live[regno]
4234 && !call_used_regs[regno]
4235 && !fixed_regs[regno]
4236 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4239 /* Return number of registers to be saved on the stack. */
4242 ix86_nsaved_regs (void)
4247 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4248 if (ix86_save_reg (regno, true))
4253 /* Return the offset between two registers, one to be eliminated, and the other
4254 its replacement, at the start of a routine. */
4257 ix86_initial_elimination_offset (int from, int to)
4259 struct ix86_frame frame;
4260 ix86_compute_frame_layout (&frame);
4262 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4263 return frame.hard_frame_pointer_offset;
4264 else if (from == FRAME_POINTER_REGNUM
4265 && to == HARD_FRAME_POINTER_REGNUM)
4266 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4269 gcc_assert (to == STACK_POINTER_REGNUM);
4271 if (from == ARG_POINTER_REGNUM)
4272 return frame.stack_pointer_offset;
4274 gcc_assert (from == FRAME_POINTER_REGNUM);
4275 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4279 /* Fill structure ix86_frame about frame of currently computed function. */
4282 ix86_compute_frame_layout (struct ix86_frame *frame)
4284 HOST_WIDE_INT total_size;
4285 unsigned int stack_alignment_needed;
4286 HOST_WIDE_INT offset;
4287 unsigned int preferred_alignment;
4288 HOST_WIDE_INT size = get_frame_size ();
4290 frame->nregs = ix86_nsaved_regs ();
4293 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4294 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4296 /* During reload iteration the amount of registers saved can change.
4297 Recompute the value as needed. Do not recompute when amount of registers
4298 didn't change as reload does multiple calls to the function and does not
4299 expect the decision to change within single iteration. */
4301 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4303 int count = frame->nregs;
4305 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4306 /* The fast prologue uses move instead of push to save registers. This
4307 is significantly longer, but also executes faster as modern hardware
4308 can execute the moves in parallel, but can't do that for push/pop.
4310 Be careful about choosing what prologue to emit: When function takes
4311 many instructions to execute we may use slow version as well as in
4312 case function is known to be outside hot spot (this is known with
4313 feedback only). Weight the size of function by number of registers
4314 to save as it is cheap to use one or two push instructions but very
4315 slow to use many of them. */
4317 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4318 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4319 || (flag_branch_probabilities
4320 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4321 cfun->machine->use_fast_prologue_epilogue = false;
4323 cfun->machine->use_fast_prologue_epilogue
4324 = !expensive_function_p (count);
4326 if (TARGET_PROLOGUE_USING_MOVE
4327 && cfun->machine->use_fast_prologue_epilogue)
4328 frame->save_regs_using_mov = true;
4330 frame->save_regs_using_mov = false;
4333 /* Skip return address and saved base pointer. */
4334 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4336 frame->hard_frame_pointer_offset = offset;
4338 /* Do some sanity checking of stack_alignment_needed and
4339 preferred_alignment, since i386 port is the only using those features
4340 that may break easily. */
4342 gcc_assert (!size || stack_alignment_needed);
4343 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
4344 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4345 gcc_assert (stack_alignment_needed
4346 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4348 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4349 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4351 /* Register save area */
4352 offset += frame->nregs * UNITS_PER_WORD;
4355 if (ix86_save_varrargs_registers)
4357 offset += X86_64_VARARGS_SIZE;
4358 frame->va_arg_size = X86_64_VARARGS_SIZE;
4361 frame->va_arg_size = 0;
4363 /* Align start of frame for local function. */
4364 frame->padding1 = ((offset + stack_alignment_needed - 1)
4365 & -stack_alignment_needed) - offset;
4367 offset += frame->padding1;
4369 /* Frame pointer points here. */
4370 frame->frame_pointer_offset = offset;
4374 /* Add outgoing arguments area. Can be skipped if we eliminated
4375 all the function calls as dead code.
4376 Skipping is however impossible when function calls alloca. Alloca
4377 expander assumes that last current_function_outgoing_args_size
4378 of stack frame are unused. */
4379 if (ACCUMULATE_OUTGOING_ARGS
4380 && (!current_function_is_leaf || current_function_calls_alloca))
4382 offset += current_function_outgoing_args_size;
4383 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4386 frame->outgoing_arguments_size = 0;
4388 /* Align stack boundary. Only needed if we're calling another function
4390 if (!current_function_is_leaf || current_function_calls_alloca)
4391 frame->padding2 = ((offset + preferred_alignment - 1)
4392 & -preferred_alignment) - offset;
4394 frame->padding2 = 0;
4396 offset += frame->padding2;
4398 /* We've reached end of stack frame. */
4399 frame->stack_pointer_offset = offset;
4401 /* Size prologue needs to allocate. */
4402 frame->to_allocate =
4403 (size + frame->padding1 + frame->padding2
4404 + frame->outgoing_arguments_size + frame->va_arg_size);
4406 if ((!frame->to_allocate && frame->nregs <= 1)
4407 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4408 frame->save_regs_using_mov = false;
4410 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4411 && current_function_is_leaf)
4413 frame->red_zone_size = frame->to_allocate;
4414 if (frame->save_regs_using_mov)
4415 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4416 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4417 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4420 frame->red_zone_size = 0;
4421 frame->to_allocate -= frame->red_zone_size;
4422 frame->stack_pointer_offset -= frame->red_zone_size;
4424 fprintf (stderr, "nregs: %i\n", frame->nregs);
4425 fprintf (stderr, "size: %i\n", size);
4426 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4427 fprintf (stderr, "padding1: %i\n", frame->padding1);
4428 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4429 fprintf (stderr, "padding2: %i\n", frame->padding2);
4430 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4431 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4432 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4433 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4434 frame->hard_frame_pointer_offset);
4435 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4439 /* Emit code to save registers in the prologue. */
4442 ix86_emit_save_regs (void)
4447 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4448 if (ix86_save_reg (regno, true))
4450 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4451 RTX_FRAME_RELATED_P (insn) = 1;
4455 /* Emit code to save registers using MOV insns. First register
4456 is restored from POINTER + OFFSET. */
4458 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4463 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4464 if (ix86_save_reg (regno, true))
4466 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4468 gen_rtx_REG (Pmode, regno));
4469 RTX_FRAME_RELATED_P (insn) = 1;
4470 offset += UNITS_PER_WORD;
4474 /* Expand prologue or epilogue stack adjustment.
4475 The pattern exist to put a dependency on all ebp-based memory accesses.
4476 STYLE should be negative if instructions should be marked as frame related,
4477 zero if %r11 register is live and cannot be freely used and positive
4481 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4486 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4487 else if (x86_64_immediate_operand (offset, DImode))
4488 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4492 /* r11 is used by indirect sibcall return as well, set before the
4493 epilogue and used after the epilogue. ATM indirect sibcall
4494 shouldn't be used together with huge frame sizes in one
4495 function because of the frame_size check in sibcall.c. */
4497 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4498 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4500 RTX_FRAME_RELATED_P (insn) = 1;
4501 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4505 RTX_FRAME_RELATED_P (insn) = 1;
4508 /* Expand the prologue into a bunch of separate insns. */
4511 ix86_expand_prologue (void)
4515 struct ix86_frame frame;
4516 HOST_WIDE_INT allocate;
4518 ix86_compute_frame_layout (&frame);
4520 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4521 slower on all targets. Also sdb doesn't like it. */
4523 if (frame_pointer_needed)
4525 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4526 RTX_FRAME_RELATED_P (insn) = 1;
4528 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4529 RTX_FRAME_RELATED_P (insn) = 1;
4532 allocate = frame.to_allocate;
4534 if (!frame.save_regs_using_mov)
4535 ix86_emit_save_regs ();
4537 allocate += frame.nregs * UNITS_PER_WORD;
4539 /* When using red zone we may start register saving before allocating
4540 the stack frame saving one cycle of the prologue. */
4541 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4542 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4543 : stack_pointer_rtx,
4544 -frame.nregs * UNITS_PER_WORD);
4548 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4549 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4550 GEN_INT (-allocate), -1);
4553 /* Only valid for Win32. */
4554 rtx eax = gen_rtx_REG (SImode, 0);
4555 bool eax_live = ix86_eax_live_at_start_p ();
4558 gcc_assert (!TARGET_64BIT);
4562 emit_insn (gen_push (eax));
4566 emit_move_insn (eax, GEN_INT (allocate));
4568 insn = emit_insn (gen_allocate_stack_worker (eax));
4569 RTX_FRAME_RELATED_P (insn) = 1;
4570 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4571 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4572 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4573 t, REG_NOTES (insn));
4577 if (frame_pointer_needed)
4578 t = plus_constant (hard_frame_pointer_rtx,
4581 - frame.nregs * UNITS_PER_WORD);
4583 t = plus_constant (stack_pointer_rtx, allocate);
4584 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4588 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4590 if (!frame_pointer_needed || !frame.to_allocate)
4591 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4593 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4594 -frame.nregs * UNITS_PER_WORD);
4597 pic_reg_used = false;
4598 if (pic_offset_table_rtx
4599 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4600 || current_function_profile))
4602 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4604 if (alt_pic_reg_used != INVALID_REGNUM)
4605 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4607 pic_reg_used = true;
4612 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4614 /* Even with accurate pre-reload life analysis, we can wind up
4615 deleting all references to the pic register after reload.
4616 Consider if cross-jumping unifies two sides of a branch
4617 controlled by a comparison vs the only read from a global.
4618 In which case, allow the set_got to be deleted, though we're
4619 too late to do anything about the ebx save in the prologue. */
4620 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4623 /* Prevent function calls from be scheduled before the call to mcount.
4624 In the pic_reg_used case, make sure that the got load isn't deleted. */
4625 if (current_function_profile)
4626 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4629 /* Emit code to restore saved registers using MOV insns. First register
4630 is restored from POINTER + OFFSET. */
4632 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4633 int maybe_eh_return)
4636 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4638 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4639 if (ix86_save_reg (regno, maybe_eh_return))
4641 /* Ensure that adjust_address won't be forced to produce pointer
4642 out of range allowed by x86-64 instruction set. */
4643 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4647 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4648 emit_move_insn (r11, GEN_INT (offset));
4649 emit_insn (gen_adddi3 (r11, r11, pointer));
4650 base_address = gen_rtx_MEM (Pmode, r11);
4653 emit_move_insn (gen_rtx_REG (Pmode, regno),
4654 adjust_address (base_address, Pmode, offset));
4655 offset += UNITS_PER_WORD;
4659 /* Restore function stack, frame, and registers. */
4662 ix86_expand_epilogue (int style)
4665 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4666 struct ix86_frame frame;
4667 HOST_WIDE_INT offset;
4669 ix86_compute_frame_layout (&frame);
4671 /* Calculate start of saved registers relative to ebp. Special care
4672 must be taken for the normal return case of a function using
4673 eh_return: the eax and edx registers are marked as saved, but not
4674 restored along this path. */
4675 offset = frame.nregs;
4676 if (current_function_calls_eh_return && style != 2)
4678 offset *= -UNITS_PER_WORD;
4680 /* If we're only restoring one register and sp is not valid then
4681 using a move instruction to restore the register since it's
4682 less work than reloading sp and popping the register.
4684 The default code result in stack adjustment using add/lea instruction,
4685 while this code results in LEAVE instruction (or discrete equivalent),
4686 so it is profitable in some other cases as well. Especially when there
4687 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4688 and there is exactly one register to pop. This heuristic may need some
4689 tuning in future. */
4690 if ((!sp_valid && frame.nregs <= 1)
4691 || (TARGET_EPILOGUE_USING_MOVE
4692 && cfun->machine->use_fast_prologue_epilogue
4693 && (frame.nregs > 1 || frame.to_allocate))
4694 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4695 || (frame_pointer_needed && TARGET_USE_LEAVE
4696 && cfun->machine->use_fast_prologue_epilogue
4697 && frame.nregs == 1)
4698 || current_function_calls_eh_return)
4700 /* Restore registers. We can use ebp or esp to address the memory
4701 locations. If both are available, default to ebp, since offsets
4702 are known to be small. Only exception is esp pointing directly to the
4703 end of block of saved registers, where we may simplify addressing
4706 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4707 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4708 frame.to_allocate, style == 2);
4710 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4711 offset, style == 2);
4713 /* eh_return epilogues need %ecx added to the stack pointer. */
4716 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4718 if (frame_pointer_needed)
4720 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4721 tmp = plus_constant (tmp, UNITS_PER_WORD);
4722 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4724 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4725 emit_move_insn (hard_frame_pointer_rtx, tmp);
4727 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4732 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4733 tmp = plus_constant (tmp, (frame.to_allocate
4734 + frame.nregs * UNITS_PER_WORD));
4735 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
4738 else if (!frame_pointer_needed)
4739 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4740 GEN_INT (frame.to_allocate
4741 + frame.nregs * UNITS_PER_WORD),
4743 /* If not an i386, mov & pop is faster than "leave". */
4744 else if (TARGET_USE_LEAVE || optimize_size
4745 || !cfun->machine->use_fast_prologue_epilogue)
4746 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4749 pro_epilogue_adjust_stack (stack_pointer_rtx,
4750 hard_frame_pointer_rtx,
4753 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4755 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4760 /* First step is to deallocate the stack frame so that we can
4761 pop the registers. */
4764 gcc_assert (frame_pointer_needed);
4765 pro_epilogue_adjust_stack (stack_pointer_rtx,
4766 hard_frame_pointer_rtx,
4767 GEN_INT (offset), style);
4769 else if (frame.to_allocate)
4770 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4771 GEN_INT (frame.to_allocate), style);
4773 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4774 if (ix86_save_reg (regno, false))
4777 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
4779 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
4781 if (frame_pointer_needed)
4783 /* Leave results in shorter dependency chains on CPUs that are
4784 able to grok it fast. */
4785 if (TARGET_USE_LEAVE)
4786 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4787 else if (TARGET_64BIT)
4788 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4790 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4794 /* Sibcall epilogues don't want a return instruction. */
4798 if (current_function_pops_args && current_function_args_size)
4800 rtx popc = GEN_INT (current_function_pops_args);
4802 /* i386 can only pop 64K bytes. If asked to pop more, pop
4803 return address, do explicit add, and jump indirectly to the
4806 if (current_function_pops_args >= 65536)
4808 rtx ecx = gen_rtx_REG (SImode, 2);
4810 /* There is no "pascal" calling convention in 64bit ABI. */
4811 gcc_assert (!TARGET_64BIT);
4813 emit_insn (gen_popsi1 (ecx));
4814 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
4815 emit_jump_insn (gen_return_indirect_internal (ecx));
4818 emit_jump_insn (gen_return_pop_internal (popc));
4821 emit_jump_insn (gen_return_internal ());
4824 /* Reset from the function's potential modifications. */
4827 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4828 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4830 if (pic_offset_table_rtx)
4831 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
4834 /* Extract the parts of an RTL expression that is a valid memory address
4835 for an instruction. Return 0 if the structure of the address is
4836 grossly off. Return -1 if the address contains ASHIFT, so it is not
4837 strictly valid, but still used for computing length of lea instruction. */
4840 ix86_decompose_address (rtx addr, struct ix86_address *out)
4842 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
4843 rtx base_reg, index_reg;
4844 HOST_WIDE_INT scale = 1;
4845 rtx scale_rtx = NULL_RTX;
4847 enum ix86_address_seg seg = SEG_DEFAULT;
4849 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
4851 else if (GET_CODE (addr) == PLUS)
4861 addends[n++] = XEXP (op, 1);
4864 while (GET_CODE (op) == PLUS);
4869 for (i = n; i >= 0; --i)
4872 switch (GET_CODE (op))
4877 index = XEXP (op, 0);
4878 scale_rtx = XEXP (op, 1);
4882 if (XINT (op, 1) == UNSPEC_TP
4883 && TARGET_TLS_DIRECT_SEG_REFS
4884 && seg == SEG_DEFAULT)
4885 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
4914 else if (GET_CODE (addr) == MULT)
4916 index = XEXP (addr, 0); /* index*scale */
4917 scale_rtx = XEXP (addr, 1);
4919 else if (GET_CODE (addr) == ASHIFT)
4923 /* We're called for lea too, which implements ashift on occasion. */
4924 index = XEXP (addr, 0);
4925 tmp = XEXP (addr, 1);
4926 if (GET_CODE (tmp) != CONST_INT)
4928 scale = INTVAL (tmp);
4929 if ((unsigned HOST_WIDE_INT) scale > 3)
4935 disp = addr; /* displacement */
4937 /* Extract the integral value of scale. */
4940 if (GET_CODE (scale_rtx) != CONST_INT)
4942 scale = INTVAL (scale_rtx);
4945 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
4946 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
4948 /* Allow arg pointer and stack pointer as index if there is not scaling. */
4949 if (base_reg && index_reg && scale == 1
4950 && (index_reg == arg_pointer_rtx
4951 || index_reg == frame_pointer_rtx
4952 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
4955 tmp = base, base = index, index = tmp;
4956 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
4959 /* Special case: %ebp cannot be encoded as a base without a displacement. */
4960 if ((base_reg == hard_frame_pointer_rtx
4961 || base_reg == frame_pointer_rtx
4962 || base_reg == arg_pointer_rtx) && !disp)
4965 /* Special case: on K6, [%esi] makes the instruction vector decoded.
4966 Avoid this by transforming to [%esi+0]. */
4967 if (ix86_tune == PROCESSOR_K6 && !optimize_size
4968 && base_reg && !index_reg && !disp
4970 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
4973 /* Special case: encode reg+reg instead of reg*2. */
4974 if (!base && index && scale && scale == 2)
4975 base = index, base_reg = index_reg, scale = 1;
4977 /* Special case: scaling cannot be encoded without base or displacement. */
4978 if (!base && !disp && index && scale != 1)
4990 /* Return cost of the memory address x.
4991 For i386, it is better to use a complex address than let gcc copy
4992 the address into a reg and make a new pseudo. But not if the address
4993 requires to two regs - that would mean more pseudos with longer
4996 ix86_address_cost (rtx x)
4998 struct ix86_address parts;
5000 int ok = ix86_decompose_address (x, &parts);
5004 if (parts.base && GET_CODE (parts.base) == SUBREG)
5005 parts.base = SUBREG_REG (parts.base);
5006 if (parts.index && GET_CODE (parts.index) == SUBREG)
5007 parts.index = SUBREG_REG (parts.index);
5009 /* More complex memory references are better. */
5010 if (parts.disp && parts.disp != const0_rtx)
5012 if (parts.seg != SEG_DEFAULT)
5015 /* Attempt to minimize number of registers in the address. */
5017 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
5019 && (!REG_P (parts.index)
5020 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
5024 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
5026 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
5027 && parts.base != parts.index)
5030 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
5031 since it's predecode logic can't detect the length of instructions
5032 and it degenerates to vector decoded. Increase cost of such
5033 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
5034 to split such addresses or even refuse such addresses at all.
5036 Following addressing modes are affected:
5041 The first and last case may be avoidable by explicitly coding the zero in
5042 memory address, but I don't have AMD-K6 machine handy to check this
5046 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
5047 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
5048 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
5054 /* If X is a machine specific address (i.e. a symbol or label being
5055 referenced as a displacement from the GOT implemented using an
5056 UNSPEC), then return the base term. Otherwise return X. */
5059 ix86_find_base_term (rtx x)
5065 if (GET_CODE (x) != CONST)
5068 if (GET_CODE (term) == PLUS
5069 && (GET_CODE (XEXP (term, 1)) == CONST_INT
5070 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
5071 term = XEXP (term, 0);
5072 if (GET_CODE (term) != UNSPEC
5073 || XINT (term, 1) != UNSPEC_GOTPCREL)
5076 term = XVECEXP (term, 0, 0);
5078 if (GET_CODE (term) != SYMBOL_REF
5079 && GET_CODE (term) != LABEL_REF)
5085 term = ix86_delegitimize_address (x);
5087 if (GET_CODE (term) != SYMBOL_REF
5088 && GET_CODE (term) != LABEL_REF)
5094 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
5095 this is used for to form addresses to local data when -fPIC is in
5099 darwin_local_data_pic (rtx disp)
5101 if (GET_CODE (disp) == MINUS)
5103 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
5104 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
5105 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
5107 const char *sym_name = XSTR (XEXP (disp, 1), 0);
5108 if (! strcmp (sym_name, "<pic base>"))
5116 /* Determine if a given RTX is a valid constant. We already know this
5117 satisfies CONSTANT_P. */
5120 legitimate_constant_p (rtx x)
5122 switch (GET_CODE (x))
5127 if (GET_CODE (x) == PLUS)
5129 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5134 if (TARGET_MACHO && darwin_local_data_pic (x))
5137 /* Only some unspecs are valid as "constants". */
5138 if (GET_CODE (x) == UNSPEC)
5139 switch (XINT (x, 1))
5143 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
5145 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
5150 /* We must have drilled down to a symbol. */
5151 if (!symbolic_operand (x, Pmode))
5156 /* TLS symbols are never valid. */
5157 if (tls_symbolic_operand (x, Pmode))
5165 /* Otherwise we handle everything else in the move patterns. */
5169 /* Determine if it's legal to put X into the constant pool. This
5170 is not possible for the address of thread-local symbols, which
5171 is checked above. */
5174 ix86_cannot_force_const_mem (rtx x)
5176 return !legitimate_constant_p (x);
5179 /* Determine if a given RTX is a valid constant address. */
5182 constant_address_p (rtx x)
5184 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
5187 /* Nonzero if the constant value X is a legitimate general operand
5188 when generating PIC code. It is given that flag_pic is on and
5189 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
5192 legitimate_pic_operand_p (rtx x)
5196 switch (GET_CODE (x))
5199 inner = XEXP (x, 0);
5201 /* Only some unspecs are valid as "constants". */
5202 if (GET_CODE (inner) == UNSPEC)
5203 switch (XINT (inner, 1))
5206 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5214 return legitimate_pic_address_disp_p (x);
5221 /* Determine if a given CONST RTX is a valid memory displacement
5225 legitimate_pic_address_disp_p (rtx disp)
5229 /* In 64bit mode we can allow direct addresses of symbols and labels
5230 when they are not dynamic symbols. */
5233 /* TLS references should always be enclosed in UNSPEC. */
5234 if (tls_symbolic_operand (disp, GET_MODE (disp)))
5236 if (GET_CODE (disp) == SYMBOL_REF
5237 && ix86_cmodel == CM_SMALL_PIC
5238 && SYMBOL_REF_LOCAL_P (disp))
5240 if (GET_CODE (disp) == LABEL_REF)
5242 if (GET_CODE (disp) == CONST
5243 && GET_CODE (XEXP (disp, 0)) == PLUS)
5245 rtx op0 = XEXP (XEXP (disp, 0), 0);
5246 rtx op1 = XEXP (XEXP (disp, 0), 1);
5248 /* TLS references should always be enclosed in UNSPEC. */
5249 if (tls_symbolic_operand (op0, GET_MODE (op0)))
5251 if (((GET_CODE (op0) == SYMBOL_REF
5252 && ix86_cmodel == CM_SMALL_PIC
5253 && SYMBOL_REF_LOCAL_P (op0))
5254 || GET_CODE (op0) == LABEL_REF)
5255 && GET_CODE (op1) == CONST_INT
5256 && INTVAL (op1) < 16*1024*1024
5257 && INTVAL (op1) >= -16*1024*1024)
5261 if (GET_CODE (disp) != CONST)
5263 disp = XEXP (disp, 0);
5267 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5268 of GOT tables. We should not need these anyway. */
5269 if (GET_CODE (disp) != UNSPEC
5270 || XINT (disp, 1) != UNSPEC_GOTPCREL)
5273 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5274 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5280 if (GET_CODE (disp) == PLUS)
5282 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5284 disp = XEXP (disp, 0);
5288 if (TARGET_MACHO && darwin_local_data_pic (disp))
5291 if (GET_CODE (disp) != UNSPEC)
5294 switch (XINT (disp, 1))
5299 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5301 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5302 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5303 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5305 case UNSPEC_GOTTPOFF:
5306 case UNSPEC_GOTNTPOFF:
5307 case UNSPEC_INDNTPOFF:
5310 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5312 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5314 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5320 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5321 memory address for an instruction. The MODE argument is the machine mode
5322 for the MEM expression that wants to use this address.
5324 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5325 convert common non-canonical forms to canonical form so that they will
5329 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5331 struct ix86_address parts;
5332 rtx base, index, disp;
5333 HOST_WIDE_INT scale;
5334 const char *reason = NULL;
5335 rtx reason_rtx = NULL_RTX;
5337 if (TARGET_DEBUG_ADDR)
5340 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5341 GET_MODE_NAME (mode), strict);
5345 if (ix86_decompose_address (addr, &parts) <= 0)
5347 reason = "decomposition failed";
5352 index = parts.index;
5354 scale = parts.scale;
5356 /* Validate base register.
5358 Don't allow SUBREG's that span more than a word here. It can lead to spill
5359 failures when the base is one word out of a two word structure, which is
5360 represented internally as a DImode int. */
5369 else if (GET_CODE (base) == SUBREG
5370 && REG_P (SUBREG_REG (base))
5371 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
5373 reg = SUBREG_REG (base);
5376 reason = "base is not a register";
5380 if (GET_MODE (base) != Pmode)
5382 reason = "base is not in Pmode";
5386 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
5387 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
5389 reason = "base is not valid";
5394 /* Validate index register.
5396 Don't allow SUBREG's that span more than a word here -- same as above. */
5405 else if (GET_CODE (index) == SUBREG
5406 && REG_P (SUBREG_REG (index))
5407 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
5409 reg = SUBREG_REG (index);
5412 reason = "index is not a register";
5416 if (GET_MODE (index) != Pmode)
5418 reason = "index is not in Pmode";
5422 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
5423 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
5425 reason = "index is not valid";
5430 /* Validate scale factor. */
5433 reason_rtx = GEN_INT (scale);
5436 reason = "scale without index";
5440 if (scale != 2 && scale != 4 && scale != 8)
5442 reason = "scale is not a valid multiplier";
5447 /* Validate displacement. */
5452 if (GET_CODE (disp) == CONST
5453 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5454 switch (XINT (XEXP (disp, 0), 1))
5458 case UNSPEC_GOTPCREL:
5459 gcc_assert (flag_pic);
5460 goto is_legitimate_pic;
5462 case UNSPEC_GOTTPOFF:
5463 case UNSPEC_GOTNTPOFF:
5464 case UNSPEC_INDNTPOFF:
5470 reason = "invalid address unspec";
5474 else if (flag_pic && (SYMBOLIC_CONST (disp)
5476 && !machopic_operand_p (disp)
5481 if (TARGET_64BIT && (index || base))
5483 /* foo@dtpoff(%rX) is ok. */
5484 if (GET_CODE (disp) != CONST
5485 || GET_CODE (XEXP (disp, 0)) != PLUS
5486 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5487 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5488 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5489 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5491 reason = "non-constant pic memory reference";
5495 else if (! legitimate_pic_address_disp_p (disp))
5497 reason = "displacement is an invalid pic construct";
5501 /* This code used to verify that a symbolic pic displacement
5502 includes the pic_offset_table_rtx register.
5504 While this is good idea, unfortunately these constructs may
5505 be created by "adds using lea" optimization for incorrect
5514 This code is nonsensical, but results in addressing
5515 GOT table with pic_offset_table_rtx base. We can't
5516 just refuse it easily, since it gets matched by
5517 "addsi3" pattern, that later gets split to lea in the
5518 case output register differs from input. While this
5519 can be handled by separate addsi pattern for this case
5520 that never results in lea, this seems to be easier and
5521 correct fix for crash to disable this test. */
5523 else if (GET_CODE (disp) != LABEL_REF
5524 && GET_CODE (disp) != CONST_INT
5525 && (GET_CODE (disp) != CONST
5526 || !legitimate_constant_p (disp))
5527 && (GET_CODE (disp) != SYMBOL_REF
5528 || !legitimate_constant_p (disp)))
5530 reason = "displacement is not constant";
5533 else if (TARGET_64BIT
5534 && !x86_64_immediate_operand (disp, VOIDmode))
5536 reason = "displacement is out of range";
5541 /* Everything looks valid. */
5542 if (TARGET_DEBUG_ADDR)
5543 fprintf (stderr, "Success.\n");
5547 if (TARGET_DEBUG_ADDR)
5549 fprintf (stderr, "Error: %s\n", reason);
5550 debug_rtx (reason_rtx);
5555 /* Return an unique alias set for the GOT. */
5557 static HOST_WIDE_INT
5558 ix86_GOT_alias_set (void)
5560 static HOST_WIDE_INT set = -1;
5562 set = new_alias_set ();
5566 /* Return a legitimate reference for ORIG (an address) using the
5567 register REG. If REG is 0, a new pseudo is generated.
5569 There are two types of references that must be handled:
5571 1. Global data references must load the address from the GOT, via
5572 the PIC reg. An insn is emitted to do this load, and the reg is
5575 2. Static data references, constant pool addresses, and code labels
5576 compute the address as an offset from the GOT, whose base is in
5577 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5578 differentiate them from global data objects. The returned
5579 address is the PIC reg + an unspec constant.
5581 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5582 reg also appears in the address. */
5585 legitimize_pic_address (rtx orig, rtx reg)
5593 reg = gen_reg_rtx (Pmode);
5594 /* Use the generic Mach-O PIC machinery. */
5595 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5598 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5600 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5602 /* This symbol may be referenced via a displacement from the PIC
5603 base address (@GOTOFF). */
5605 if (reload_in_progress)
5606 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5607 if (GET_CODE (addr) == CONST)
5608 addr = XEXP (addr, 0);
5609 if (GET_CODE (addr) == PLUS)
5611 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5612 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5615 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5616 new = gen_rtx_CONST (Pmode, new);
5617 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5621 emit_move_insn (reg, new);
5625 else if (GET_CODE (addr) == SYMBOL_REF)
5629 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5630 new = gen_rtx_CONST (Pmode, new);
5631 new = gen_const_mem (Pmode, new);
5632 set_mem_alias_set (new, ix86_GOT_alias_set ());
5635 reg = gen_reg_rtx (Pmode);
5636 /* Use directly gen_movsi, otherwise the address is loaded
5637 into register for CSE. We don't want to CSE this addresses,
5638 instead we CSE addresses from the GOT table, so skip this. */
5639 emit_insn (gen_movsi (reg, new));
5644 /* This symbol must be referenced via a load from the
5645 Global Offset Table (@GOT). */
5647 if (reload_in_progress)
5648 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5649 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5650 new = gen_rtx_CONST (Pmode, new);
5651 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5652 new = gen_const_mem (Pmode, new);
5653 set_mem_alias_set (new, ix86_GOT_alias_set ());
5656 reg = gen_reg_rtx (Pmode);
5657 emit_move_insn (reg, new);
5663 if (GET_CODE (addr) == CONST)
5665 addr = XEXP (addr, 0);
5667 /* We must match stuff we generate before. Assume the only
5668 unspecs that can get here are ours. Not that we could do
5669 anything with them anyway.... */
5670 if (GET_CODE (addr) == UNSPEC
5671 || (GET_CODE (addr) == PLUS
5672 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5674 gcc_assert (GET_CODE (addr) == PLUS);
5676 if (GET_CODE (addr) == PLUS)
5678 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5680 /* Check first to see if this is a constant offset from a @GOTOFF
5681 symbol reference. */
5682 if (local_symbolic_operand (op0, Pmode)
5683 && GET_CODE (op1) == CONST_INT)
5687 if (reload_in_progress)
5688 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5689 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
5691 new = gen_rtx_PLUS (Pmode, new, op1);
5692 new = gen_rtx_CONST (Pmode, new);
5693 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5697 emit_move_insn (reg, new);
5703 if (INTVAL (op1) < -16*1024*1024
5704 || INTVAL (op1) >= 16*1024*1024)
5705 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
5710 base = legitimize_pic_address (XEXP (addr, 0), reg);
5711 new = legitimize_pic_address (XEXP (addr, 1),
5712 base == reg ? NULL_RTX : reg);
5714 if (GET_CODE (new) == CONST_INT)
5715 new = plus_constant (base, INTVAL (new));
5718 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
5720 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
5721 new = XEXP (new, 1);
5723 new = gen_rtx_PLUS (Pmode, base, new);
5731 /* Load the thread pointer. If TO_REG is true, force it into a register. */
5734 get_thread_pointer (int to_reg)
5738 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
5742 reg = gen_reg_rtx (Pmode);
5743 insn = gen_rtx_SET (VOIDmode, reg, tp);
5744 insn = emit_insn (insn);
5749 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
5750 false if we expect this to be used for a memory address and true if
5751 we expect to load the address into a register. */
5754 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
5756 rtx dest, base, off, pic;
5761 case TLS_MODEL_GLOBAL_DYNAMIC:
5762 dest = gen_reg_rtx (Pmode);
5765 rtx rax = gen_rtx_REG (Pmode, 0), insns;
5768 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
5769 insns = get_insns ();
5772 emit_libcall_block (insns, dest, rax, x);
5775 emit_insn (gen_tls_global_dynamic_32 (dest, x));
5778 case TLS_MODEL_LOCAL_DYNAMIC:
5779 base = gen_reg_rtx (Pmode);
5782 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
5785 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
5786 insns = get_insns ();
5789 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
5790 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
5791 emit_libcall_block (insns, base, rax, note);
5794 emit_insn (gen_tls_local_dynamic_base_32 (base));
5796 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
5797 off = gen_rtx_CONST (Pmode, off);
5799 return gen_rtx_PLUS (Pmode, base, off);
5801 case TLS_MODEL_INITIAL_EXEC:
5805 type = UNSPEC_GOTNTPOFF;
5809 if (reload_in_progress)
5810 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5811 pic = pic_offset_table_rtx;
5812 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
5814 else if (!TARGET_GNU_TLS)
5816 pic = gen_reg_rtx (Pmode);
5817 emit_insn (gen_set_got (pic));
5818 type = UNSPEC_GOTTPOFF;
5823 type = UNSPEC_INDNTPOFF;
5826 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
5827 off = gen_rtx_CONST (Pmode, off);
5829 off = gen_rtx_PLUS (Pmode, pic, off);
5830 off = gen_const_mem (Pmode, off);
5831 set_mem_alias_set (off, ix86_GOT_alias_set ());
5833 if (TARGET_64BIT || TARGET_GNU_TLS)
5835 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5836 off = force_reg (Pmode, off);
5837 return gen_rtx_PLUS (Pmode, base, off);
5841 base = get_thread_pointer (true);
5842 dest = gen_reg_rtx (Pmode);
5843 emit_insn (gen_subsi3 (dest, base, off));
5847 case TLS_MODEL_LOCAL_EXEC:
5848 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
5849 (TARGET_64BIT || TARGET_GNU_TLS)
5850 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
5851 off = gen_rtx_CONST (Pmode, off);
5853 if (TARGET_64BIT || TARGET_GNU_TLS)
5855 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5856 return gen_rtx_PLUS (Pmode, base, off);
5860 base = get_thread_pointer (true);
5861 dest = gen_reg_rtx (Pmode);
5862 emit_insn (gen_subsi3 (dest, base, off));
5873 /* Try machine-dependent ways of modifying an illegitimate address
5874 to be legitimate. If we find one, return the new, valid address.
5875 This macro is used in only one place: `memory_address' in explow.c.
5877 OLDX is the address as it was before break_out_memory_refs was called.
5878 In some cases it is useful to look at this to decide what needs to be done.
5880 MODE and WIN are passed so that this macro can use
5881 GO_IF_LEGITIMATE_ADDRESS.
5883 It is always safe for this macro to do nothing. It exists to recognize
5884 opportunities to optimize the output.
5886 For the 80386, we handle X+REG by loading X into a register R and
5887 using R+REG. R will go in a general reg and indexing will be used.
5888 However, if REG is a broken-out memory address or multiplication,
5889 nothing needs to be done because REG can certainly go in a general reg.
5891 When -fpic is used, special handling is needed for symbolic references.
5892 See comments by legitimize_pic_address in i386.c for details. */
5895 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
5900 if (TARGET_DEBUG_ADDR)
5902 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
5903 GET_MODE_NAME (mode));
5907 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
5909 return legitimize_tls_address (x, log, false);
5910 if (GET_CODE (x) == CONST
5911 && GET_CODE (XEXP (x, 0)) == PLUS
5912 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5913 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
5915 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
5916 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
5919 if (flag_pic && SYMBOLIC_CONST (x))
5920 return legitimize_pic_address (x, 0);
5922 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
5923 if (GET_CODE (x) == ASHIFT
5924 && GET_CODE (XEXP (x, 1)) == CONST_INT
5925 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
5928 log = INTVAL (XEXP (x, 1));
5929 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
5930 GEN_INT (1 << log));
5933 if (GET_CODE (x) == PLUS)
5935 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
5937 if (GET_CODE (XEXP (x, 0)) == ASHIFT
5938 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5939 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
5942 log = INTVAL (XEXP (XEXP (x, 0), 1));
5943 XEXP (x, 0) = gen_rtx_MULT (Pmode,
5944 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
5945 GEN_INT (1 << log));
5948 if (GET_CODE (XEXP (x, 1)) == ASHIFT
5949 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5950 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
5953 log = INTVAL (XEXP (XEXP (x, 1), 1));
5954 XEXP (x, 1) = gen_rtx_MULT (Pmode,
5955 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
5956 GEN_INT (1 << log));
5959 /* Put multiply first if it isn't already. */
5960 if (GET_CODE (XEXP (x, 1)) == MULT)
5962 rtx tmp = XEXP (x, 0);
5963 XEXP (x, 0) = XEXP (x, 1);
5968 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
5969 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
5970 created by virtual register instantiation, register elimination, and
5971 similar optimizations. */
5972 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
5975 x = gen_rtx_PLUS (Pmode,
5976 gen_rtx_PLUS (Pmode, XEXP (x, 0),
5977 XEXP (XEXP (x, 1), 0)),
5978 XEXP (XEXP (x, 1), 1));
5982 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
5983 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
5984 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
5985 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5986 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
5987 && CONSTANT_P (XEXP (x, 1)))
5990 rtx other = NULL_RTX;
5992 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5994 constant = XEXP (x, 1);
5995 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
5997 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
5999 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
6000 other = XEXP (x, 1);
6008 x = gen_rtx_PLUS (Pmode,
6009 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
6010 XEXP (XEXP (XEXP (x, 0), 1), 0)),
6011 plus_constant (other, INTVAL (constant)));
6015 if (changed && legitimate_address_p (mode, x, FALSE))
6018 if (GET_CODE (XEXP (x, 0)) == MULT)
6021 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
6024 if (GET_CODE (XEXP (x, 1)) == MULT)
6027 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
6031 && GET_CODE (XEXP (x, 1)) == REG
6032 && GET_CODE (XEXP (x, 0)) == REG)
6035 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
6038 x = legitimize_pic_address (x, 0);
6041 if (changed && legitimate_address_p (mode, x, FALSE))
6044 if (GET_CODE (XEXP (x, 0)) == REG)
6046 rtx temp = gen_reg_rtx (Pmode);
6047 rtx val = force_operand (XEXP (x, 1), temp);
6049 emit_move_insn (temp, val);
6055 else if (GET_CODE (XEXP (x, 1)) == REG)
6057 rtx temp = gen_reg_rtx (Pmode);
6058 rtx val = force_operand (XEXP (x, 0), temp);
6060 emit_move_insn (temp, val);
6070 /* Print an integer constant expression in assembler syntax. Addition
6071 and subtraction are the only arithmetic that may appear in these
6072 expressions. FILE is the stdio stream to write to, X is the rtx, and
6073 CODE is the operand print code from the output string. */
6076 output_pic_addr_const (FILE *file, rtx x, int code)
6080 switch (GET_CODE (x))
6083 gcc_assert (flag_pic);
6088 assemble_name (file, XSTR (x, 0));
6089 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
6090 fputs ("@PLT", file);
6097 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
6098 assemble_name (asm_out_file, buf);
6102 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6106 /* This used to output parentheses around the expression,
6107 but that does not work on the 386 (either ATT or BSD assembler). */
6108 output_pic_addr_const (file, XEXP (x, 0), code);
6112 if (GET_MODE (x) == VOIDmode)
6114 /* We can use %d if the number is <32 bits and positive. */
6115 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
6116 fprintf (file, "0x%lx%08lx",
6117 (unsigned long) CONST_DOUBLE_HIGH (x),
6118 (unsigned long) CONST_DOUBLE_LOW (x));
6120 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
6123 /* We can't handle floating point constants;
6124 PRINT_OPERAND must handle them. */
6125 output_operand_lossage ("floating constant misused");
6129 /* Some assemblers need integer constants to appear first. */
6130 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
6132 output_pic_addr_const (file, XEXP (x, 0), code);
6134 output_pic_addr_const (file, XEXP (x, 1), code);
6138 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
6139 output_pic_addr_const (file, XEXP (x, 1), code);
6141 output_pic_addr_const (file, XEXP (x, 0), code);
6147 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
6148 output_pic_addr_const (file, XEXP (x, 0), code);
6150 output_pic_addr_const (file, XEXP (x, 1), code);
6152 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
6156 gcc_assert (XVECLEN (x, 0) == 1);
6157 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
6158 switch (XINT (x, 1))
6161 fputs ("@GOT", file);
6164 fputs ("@GOTOFF", file);
6166 case UNSPEC_GOTPCREL:
6167 fputs ("@GOTPCREL(%rip)", file);
6169 case UNSPEC_GOTTPOFF:
6170 /* FIXME: This might be @TPOFF in Sun ld too. */
6171 fputs ("@GOTTPOFF", file);
6174 fputs ("@TPOFF", file);
6178 fputs ("@TPOFF", file);
6180 fputs ("@NTPOFF", file);
6183 fputs ("@DTPOFF", file);
6185 case UNSPEC_GOTNTPOFF:
6187 fputs ("@GOTTPOFF(%rip)", file);
6189 fputs ("@GOTNTPOFF", file);
6191 case UNSPEC_INDNTPOFF:
6192 fputs ("@INDNTPOFF", file);
6195 output_operand_lossage ("invalid UNSPEC as operand");
6201 output_operand_lossage ("invalid expression as operand");
6205 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6206 We need to emit DTP-relative relocations. */
6209 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6211 fputs (ASM_LONG, file);
6212 output_addr_const (file, x);
6213 fputs ("@DTPOFF", file);
6219 fputs (", 0", file);
6226 /* In the name of slightly smaller debug output, and to cater to
6227 general assembler lossage, recognize PIC+GOTOFF and turn it back
6228 into a direct symbol reference. */
6231 ix86_delegitimize_address (rtx orig_x)
6235 if (GET_CODE (x) == MEM)
6240 if (GET_CODE (x) != CONST
6241 || GET_CODE (XEXP (x, 0)) != UNSPEC
6242 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6243 || GET_CODE (orig_x) != MEM)
6245 return XVECEXP (XEXP (x, 0), 0, 0);
6248 if (GET_CODE (x) != PLUS
6249 || GET_CODE (XEXP (x, 1)) != CONST)
6252 if (GET_CODE (XEXP (x, 0)) == REG
6253 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6254 /* %ebx + GOT/GOTOFF */
6256 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6258 /* %ebx + %reg * scale + GOT/GOTOFF */
6260 if (GET_CODE (XEXP (y, 0)) == REG
6261 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6263 else if (GET_CODE (XEXP (y, 1)) == REG
6264 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6268 if (GET_CODE (y) != REG
6269 && GET_CODE (y) != MULT
6270 && GET_CODE (y) != ASHIFT)
6276 x = XEXP (XEXP (x, 1), 0);
6277 if (GET_CODE (x) == UNSPEC
6278 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6279 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6282 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6283 return XVECEXP (x, 0, 0);
6286 if (GET_CODE (x) == PLUS
6287 && GET_CODE (XEXP (x, 0)) == UNSPEC
6288 && GET_CODE (XEXP (x, 1)) == CONST_INT
6289 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6290 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6291 && GET_CODE (orig_x) != MEM)))
6293 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6295 return gen_rtx_PLUS (Pmode, y, x);
6303 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6308 if (mode == CCFPmode || mode == CCFPUmode)
6310 enum rtx_code second_code, bypass_code;
6311 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6312 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
6313 code = ix86_fp_compare_code_to_integer (code);
6317 code = reverse_condition (code);
6328 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
6332 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
6333 Those same assemblers have the same but opposite lossage on cmov. */
6334 gcc_assert (mode == CCmode);
6335 suffix = fp ? "nbe" : "a";
6355 gcc_assert (mode == CCmode);
6377 gcc_assert (mode == CCmode);
6378 suffix = fp ? "nb" : "ae";
6381 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
6385 gcc_assert (mode == CCmode);
6389 suffix = fp ? "u" : "p";
6392 suffix = fp ? "nu" : "np";
6397 fputs (suffix, file);
6400 /* Print the name of register X to FILE based on its machine mode and number.
6401 If CODE is 'w', pretend the mode is HImode.
6402 If CODE is 'b', pretend the mode is QImode.
6403 If CODE is 'k', pretend the mode is SImode.
6404 If CODE is 'q', pretend the mode is DImode.
6405 If CODE is 'h', pretend the reg is the 'high' byte register.
6406 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6409 print_reg (rtx x, int code, FILE *file)
6411 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
6412 && REGNO (x) != FRAME_POINTER_REGNUM
6413 && REGNO (x) != FLAGS_REG
6414 && REGNO (x) != FPSR_REG);
6416 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6419 if (code == 'w' || MMX_REG_P (x))
6421 else if (code == 'b')
6423 else if (code == 'k')
6425 else if (code == 'q')
6427 else if (code == 'y')
6429 else if (code == 'h')
6432 code = GET_MODE_SIZE (GET_MODE (x));
6434 /* Irritatingly, AMD extended registers use different naming convention
6435 from the normal registers. */
6436 if (REX_INT_REG_P (x))
6438 gcc_assert (TARGET_64BIT);
6442 error ("extended registers have no high halves");
6445 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6448 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6451 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6454 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6457 error ("unsupported operand size for extended register");
6465 if (STACK_TOP_P (x))
6467 fputs ("st(0)", file);
6474 if (! ANY_FP_REG_P (x))
6475 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6480 fputs (hi_reg_name[REGNO (x)], file);
6483 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6485 fputs (qi_reg_name[REGNO (x)], file);
6488 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6490 fputs (qi_high_reg_name[REGNO (x)], file);
6497 /* Locate some local-dynamic symbol still in use by this function
6498 so that we can print its name in some tls_local_dynamic_base
6502 get_some_local_dynamic_name (void)
6506 if (cfun->machine->some_ld_name)
6507 return cfun->machine->some_ld_name;
6509 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6511 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6512 return cfun->machine->some_ld_name;
6518 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6522 if (GET_CODE (x) == SYMBOL_REF
6523 && local_dynamic_symbolic_operand (x, Pmode))
6525 cfun->machine->some_ld_name = XSTR (x, 0);
6533 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6534 C -- print opcode suffix for set/cmov insn.
6535 c -- like C, but print reversed condition
6536 F,f -- likewise, but for floating-point.
6537 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6539 R -- print the prefix for register names.
6540 z -- print the opcode suffix for the size of the current operand.
6541 * -- print a star (in certain assembler syntax)
6542 A -- print an absolute memory reference.
6543 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6544 s -- print a shift double count, followed by the assemblers argument
6546 b -- print the QImode name of the register for the indicated operand.
6547 %b0 would print %al if operands[0] is reg 0.
6548 w -- likewise, print the HImode name of the register.
6549 k -- likewise, print the SImode name of the register.
6550 q -- likewise, print the DImode name of the register.
6551 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6552 y -- print "st(0)" instead of "st" as a register.
6553 D -- print condition for SSE cmp instruction.
6554 P -- if PIC, print an @PLT suffix.
6555 X -- don't print any sort of PIC '@' suffix for a symbol.
6556 & -- print some in-use local-dynamic symbol name.
6557 H -- print a memory address offset by 8; used for sse high-parts
6561 print_operand (FILE *file, rtx x, int code)
6568 if (ASSEMBLER_DIALECT == ASM_ATT)
6573 assemble_name (file, get_some_local_dynamic_name ());
6577 switch (ASSEMBLER_DIALECT)
6584 /* Intel syntax. For absolute addresses, registers should not
6585 be surrounded by braces. */
6586 if (GET_CODE (x) != REG)
6589 PRINT_OPERAND (file, x, 0);
6599 PRINT_OPERAND (file, x, 0);
6604 if (ASSEMBLER_DIALECT == ASM_ATT)
6609 if (ASSEMBLER_DIALECT == ASM_ATT)
6614 if (ASSEMBLER_DIALECT == ASM_ATT)
6619 if (ASSEMBLER_DIALECT == ASM_ATT)
6624 if (ASSEMBLER_DIALECT == ASM_ATT)
6629 if (ASSEMBLER_DIALECT == ASM_ATT)
6634 /* 387 opcodes don't get size suffixes if the operands are
6636 if (STACK_REG_P (x))
6639 /* Likewise if using Intel opcodes. */
6640 if (ASSEMBLER_DIALECT == ASM_INTEL)
6643 /* This is the size of op from size of operand. */
6644 switch (GET_MODE_SIZE (GET_MODE (x)))
6647 #ifdef HAVE_GAS_FILDS_FISTS
6653 if (GET_MODE (x) == SFmode)
6668 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6670 #ifdef GAS_MNEMONICS
6696 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
6698 PRINT_OPERAND (file, x, 0);
6704 /* Little bit of braindamage here. The SSE compare instructions
6705 does use completely different names for the comparisons that the
6706 fp conditional moves. */
6707 switch (GET_CODE (x))
6722 fputs ("unord", file);
6726 fputs ("neq", file);
6730 fputs ("nlt", file);
6734 fputs ("nle", file);
6737 fputs ("ord", file);
6744 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6745 if (ASSEMBLER_DIALECT == ASM_ATT)
6747 switch (GET_MODE (x))
6749 case HImode: putc ('w', file); break;
6751 case SFmode: putc ('l', file); break;
6753 case DFmode: putc ('q', file); break;
6754 default: gcc_unreachable ();
6761 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
6764 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6765 if (ASSEMBLER_DIALECT == ASM_ATT)
6768 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
6771 /* Like above, but reverse condition */
6773 /* Check to see if argument to %c is really a constant
6774 and not a condition code which needs to be reversed. */
6775 if (!COMPARISON_P (x))
6777 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
6780 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
6783 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6784 if (ASSEMBLER_DIALECT == ASM_ATT)
6787 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
6791 /* It doesn't actually matter what mode we use here, as we're
6792 only going to use this for printing. */
6793 x = adjust_address_nv (x, DImode, 8);
6800 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
6803 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
6806 int pred_val = INTVAL (XEXP (x, 0));
6808 if (pred_val < REG_BR_PROB_BASE * 45 / 100
6809 || pred_val > REG_BR_PROB_BASE * 55 / 100)
6811 int taken = pred_val > REG_BR_PROB_BASE / 2;
6812 int cputaken = final_forward_branch_p (current_output_insn) == 0;
6814 /* Emit hints only in the case default branch prediction
6815 heuristics would fail. */
6816 if (taken != cputaken)
6818 /* We use 3e (DS) prefix for taken branches and
6819 2e (CS) prefix for not taken branches. */
6821 fputs ("ds ; ", file);
6823 fputs ("cs ; ", file);
6830 output_operand_lossage ("invalid operand code '%c'", code);
6834 if (GET_CODE (x) == REG)
6835 print_reg (x, code, file);
6837 else if (GET_CODE (x) == MEM)
6839 /* No `byte ptr' prefix for call instructions. */
6840 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
6843 switch (GET_MODE_SIZE (GET_MODE (x)))
6845 case 1: size = "BYTE"; break;
6846 case 2: size = "WORD"; break;
6847 case 4: size = "DWORD"; break;
6848 case 8: size = "QWORD"; break;
6849 case 12: size = "XWORD"; break;
6850 case 16: size = "XMMWORD"; break;
6855 /* Check for explicit size override (codes 'b', 'w' and 'k') */
6858 else if (code == 'w')
6860 else if (code == 'k')
6864 fputs (" PTR ", file);
6868 /* Avoid (%rip) for call operands. */
6869 if (CONSTANT_ADDRESS_P (x) && code == 'P'
6870 && GET_CODE (x) != CONST_INT)
6871 output_addr_const (file, x);
6872 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
6873 output_operand_lossage ("invalid constraints for operand");
6878 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
6883 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6884 REAL_VALUE_TO_TARGET_SINGLE (r, l);
6886 if (ASSEMBLER_DIALECT == ASM_ATT)
6888 fprintf (file, "0x%08lx", l);
6891 /* These float cases don't actually occur as immediate operands. */
6892 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
6896 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6897 fprintf (file, "%s", dstr);
6900 else if (GET_CODE (x) == CONST_DOUBLE
6901 && GET_MODE (x) == XFmode)
6905 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6906 fprintf (file, "%s", dstr);
6911 /* We have patterns that allow zero sets of memory, for instance.
6912 In 64-bit mode, we should probably support all 8-byte vectors,
6913 since we can in fact encode that into an immediate. */
6914 if (GET_CODE (x) == CONST_VECTOR)
6916 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
6922 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
6924 if (ASSEMBLER_DIALECT == ASM_ATT)
6927 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
6928 || GET_CODE (x) == LABEL_REF)
6930 if (ASSEMBLER_DIALECT == ASM_ATT)
6933 fputs ("OFFSET FLAT:", file);
6936 if (GET_CODE (x) == CONST_INT)
6937 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6939 output_pic_addr_const (file, x, code);
6941 output_addr_const (file, x);
6945 /* Print a memory operand whose address is ADDR. */
6948 print_operand_address (FILE *file, rtx addr)
6950 struct ix86_address parts;
6951 rtx base, index, disp;
6953 int ok = ix86_decompose_address (addr, &parts);
6958 index = parts.index;
6960 scale = parts.scale;
6968 if (USER_LABEL_PREFIX[0] == 0)
6970 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
6976 if (!base && !index)
6978 /* Displacement only requires special attention. */
6980 if (GET_CODE (disp) == CONST_INT)
6982 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
6984 if (USER_LABEL_PREFIX[0] == 0)
6986 fputs ("ds:", file);
6988 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
6991 output_pic_addr_const (file, disp, 0);
6993 output_addr_const (file, disp);
6995 /* Use one byte shorter RIP relative addressing for 64bit mode. */
6997 && ((GET_CODE (disp) == SYMBOL_REF
6998 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
6999 || GET_CODE (disp) == LABEL_REF
7000 || (GET_CODE (disp) == CONST
7001 && GET_CODE (XEXP (disp, 0)) == PLUS
7002 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
7003 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
7004 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
7005 fputs ("(%rip)", file);
7009 if (ASSEMBLER_DIALECT == ASM_ATT)
7014 output_pic_addr_const (file, disp, 0);
7015 else if (GET_CODE (disp) == LABEL_REF)
7016 output_asm_label (disp);
7018 output_addr_const (file, disp);
7023 print_reg (base, 0, file);
7027 print_reg (index, 0, file);
7029 fprintf (file, ",%d", scale);
7035 rtx offset = NULL_RTX;
7039 /* Pull out the offset of a symbol; print any symbol itself. */
7040 if (GET_CODE (disp) == CONST
7041 && GET_CODE (XEXP (disp, 0)) == PLUS
7042 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7044 offset = XEXP (XEXP (disp, 0), 1);
7045 disp = gen_rtx_CONST (VOIDmode,
7046 XEXP (XEXP (disp, 0), 0));
7050 output_pic_addr_const (file, disp, 0);
7051 else if (GET_CODE (disp) == LABEL_REF)
7052 output_asm_label (disp);
7053 else if (GET_CODE (disp) == CONST_INT)
7056 output_addr_const (file, disp);
7062 print_reg (base, 0, file);
7065 if (INTVAL (offset) >= 0)
7067 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7071 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7078 print_reg (index, 0, file);
7080 fprintf (file, "*%d", scale);
7088 output_addr_const_extra (FILE *file, rtx x)
7092 if (GET_CODE (x) != UNSPEC)
7095 op = XVECEXP (x, 0, 0);
7096 switch (XINT (x, 1))
7098 case UNSPEC_GOTTPOFF:
7099 output_addr_const (file, op);
7100 /* FIXME: This might be @TPOFF in Sun ld. */
7101 fputs ("@GOTTPOFF", file);
7104 output_addr_const (file, op);
7105 fputs ("@TPOFF", file);
7108 output_addr_const (file, op);
7110 fputs ("@TPOFF", file);
7112 fputs ("@NTPOFF", file);
7115 output_addr_const (file, op);
7116 fputs ("@DTPOFF", file);
7118 case UNSPEC_GOTNTPOFF:
7119 output_addr_const (file, op);
7121 fputs ("@GOTTPOFF(%rip)", file);
7123 fputs ("@GOTNTPOFF", file);
7125 case UNSPEC_INDNTPOFF:
7126 output_addr_const (file, op);
7127 fputs ("@INDNTPOFF", file);
7137 /* Split one or more DImode RTL references into pairs of SImode
7138 references. The RTL can be REG, offsettable MEM, integer constant, or
7139 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7140 split and "num" is its length. lo_half and hi_half are output arrays
7141 that parallel "operands". */
7144 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7148 rtx op = operands[num];
7150 /* simplify_subreg refuse to split volatile memory addresses,
7151 but we still have to handle it. */
7152 if (GET_CODE (op) == MEM)
7154 lo_half[num] = adjust_address (op, SImode, 0);
7155 hi_half[num] = adjust_address (op, SImode, 4);
7159 lo_half[num] = simplify_gen_subreg (SImode, op,
7160 GET_MODE (op) == VOIDmode
7161 ? DImode : GET_MODE (op), 0);
7162 hi_half[num] = simplify_gen_subreg (SImode, op,
7163 GET_MODE (op) == VOIDmode
7164 ? DImode : GET_MODE (op), 4);
7168 /* Split one or more TImode RTL references into pairs of SImode
7169 references. The RTL can be REG, offsettable MEM, integer constant, or
7170 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7171 split and "num" is its length. lo_half and hi_half are output arrays
7172 that parallel "operands". */
7175 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7179 rtx op = operands[num];
7181 /* simplify_subreg refuse to split volatile memory addresses, but we
7182 still have to handle it. */
7183 if (GET_CODE (op) == MEM)
7185 lo_half[num] = adjust_address (op, DImode, 0);
7186 hi_half[num] = adjust_address (op, DImode, 8);
7190 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
7191 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
7196 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
7197 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
7198 is the expression of the binary operation. The output may either be
7199 emitted here, or returned to the caller, like all output_* functions.
7201 There is no guarantee that the operands are the same mode, as they
7202 might be within FLOAT or FLOAT_EXTEND expressions. */
7204 #ifndef SYSV386_COMPAT
7205 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
7206 wants to fix the assemblers because that causes incompatibility
7207 with gcc. No-one wants to fix gcc because that causes
7208 incompatibility with assemblers... You can use the option of
7209 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
7210 #define SYSV386_COMPAT 1
7214 output_387_binary_op (rtx insn, rtx *operands)
7216 static char buf[30];
7219 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
7221 #ifdef ENABLE_CHECKING
7222 /* Even if we do not want to check the inputs, this documents input
7223 constraints. Which helps in understanding the following code. */
7224 if (STACK_REG_P (operands[0])
7225 && ((REG_P (operands[1])
7226 && REGNO (operands[0]) == REGNO (operands[1])
7227 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7228 || (REG_P (operands[2])
7229 && REGNO (operands[0]) == REGNO (operands[2])
7230 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7231 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7234 gcc_assert (is_sse);
7237 switch (GET_CODE (operands[3]))
7240 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7241 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7249 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7250 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7258 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7259 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7267 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7268 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7282 if (GET_MODE (operands[0]) == SFmode)
7283 strcat (buf, "ss\t{%2, %0|%0, %2}");
7285 strcat (buf, "sd\t{%2, %0|%0, %2}");
7290 switch (GET_CODE (operands[3]))
7294 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7296 rtx temp = operands[2];
7297 operands[2] = operands[1];
7301 /* know operands[0] == operands[1]. */
7303 if (GET_CODE (operands[2]) == MEM)
7309 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7311 if (STACK_TOP_P (operands[0]))
7312 /* How is it that we are storing to a dead operand[2]?
7313 Well, presumably operands[1] is dead too. We can't
7314 store the result to st(0) as st(0) gets popped on this
7315 instruction. Instead store to operands[2] (which I
7316 think has to be st(1)). st(1) will be popped later.
7317 gcc <= 2.8.1 didn't have this check and generated
7318 assembly code that the Unixware assembler rejected. */
7319 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7321 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7325 if (STACK_TOP_P (operands[0]))
7326 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7328 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7333 if (GET_CODE (operands[1]) == MEM)
7339 if (GET_CODE (operands[2]) == MEM)
7345 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7348 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7349 derived assemblers, confusingly reverse the direction of
7350 the operation for fsub{r} and fdiv{r} when the
7351 destination register is not st(0). The Intel assembler
7352 doesn't have this brain damage. Read !SYSV386_COMPAT to
7353 figure out what the hardware really does. */
7354 if (STACK_TOP_P (operands[0]))
7355 p = "{p\t%0, %2|rp\t%2, %0}";
7357 p = "{rp\t%2, %0|p\t%0, %2}";
7359 if (STACK_TOP_P (operands[0]))
7360 /* As above for fmul/fadd, we can't store to st(0). */
7361 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7363 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7368 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7371 if (STACK_TOP_P (operands[0]))
7372 p = "{rp\t%0, %1|p\t%1, %0}";
7374 p = "{p\t%1, %0|rp\t%0, %1}";
7376 if (STACK_TOP_P (operands[0]))
7377 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7379 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7384 if (STACK_TOP_P (operands[0]))
7386 if (STACK_TOP_P (operands[1]))
7387 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7389 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7392 else if (STACK_TOP_P (operands[1]))
7395 p = "{\t%1, %0|r\t%0, %1}";
7397 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7403 p = "{r\t%2, %0|\t%0, %2}";
7405 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7418 /* Return needed mode for entity in optimize_mode_switching pass. */
7421 ix86_mode_needed (int entity, rtx insn)
7423 enum attr_i387_cw mode;
7425 /* The mode UNINITIALIZED is used to store control word after a
7426 function call or ASM pattern. The mode ANY specify that function
7427 has no requirements on the control word and make no changes in the
7428 bits we are interested in. */
7431 || (NONJUMP_INSN_P (insn)
7432 && (asm_noperands (PATTERN (insn)) >= 0
7433 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
7434 return I387_CW_UNINITIALIZED;
7436 if (recog_memoized (insn) < 0)
7439 mode = get_attr_i387_cw (insn);
7444 if (mode == I387_CW_TRUNC)
7449 if (mode == I387_CW_FLOOR)
7454 if (mode == I387_CW_CEIL)
7459 if (mode == I387_CW_MASK_PM)
7470 /* Output code to initialize control word copies used by trunc?f?i and
7471 rounding patterns. CURRENT_MODE is set to current control word,
7472 while NEW_MODE is set to new control word. */
7475 emit_i387_cw_initialization (int mode)
7477 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
7482 rtx reg = gen_reg_rtx (HImode);
7484 emit_insn (gen_x86_fnstcw_1 (stored_mode));
7485 emit_move_insn (reg, stored_mode);
7487 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
7492 /* round toward zero (truncate) */
7493 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7494 slot = SLOT_CW_TRUNC;
7498 /* round down toward -oo */
7499 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7500 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7501 slot = SLOT_CW_FLOOR;
7505 /* round up toward +oo */
7506 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7507 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7508 slot = SLOT_CW_CEIL;
7511 case I387_CW_MASK_PM:
7512 /* mask precision exception for nearbyint() */
7513 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7514 slot = SLOT_CW_MASK_PM;
7526 /* round toward zero (truncate) */
7527 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7528 slot = SLOT_CW_TRUNC;
7532 /* round down toward -oo */
7533 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7534 slot = SLOT_CW_FLOOR;
7538 /* round up toward +oo */
7539 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7540 slot = SLOT_CW_CEIL;
7543 case I387_CW_MASK_PM:
7544 /* mask precision exception for nearbyint() */
7545 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7546 slot = SLOT_CW_MASK_PM;
7554 gcc_assert (slot < MAX_386_STACK_LOCALS);
7556 new_mode = assign_386_stack_local (HImode, slot);
7557 emit_move_insn (new_mode, reg);
7560 /* Output code for INSN to convert a float to a signed int. OPERANDS
7561 are the insn operands. The output may be [HSD]Imode and the input
7562 operand may be [SDX]Fmode. */
7565 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
7567 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7568 int dimode_p = GET_MODE (operands[0]) == DImode;
7569 int round_mode = get_attr_i387_cw (insn);
7571 /* Jump through a hoop or two for DImode, since the hardware has no
7572 non-popping instruction. We used to do this a different way, but
7573 that was somewhat fragile and broke with post-reload splitters. */
7574 if ((dimode_p || fisttp) && !stack_top_dies)
7575 output_asm_insn ("fld\t%y1", operands);
7577 gcc_assert (STACK_TOP_P (operands[1]));
7578 gcc_assert (GET_CODE (operands[0]) == MEM);
7581 output_asm_insn ("fisttp%z0\t%0", operands);
7584 if (round_mode != I387_CW_ANY)
7585 output_asm_insn ("fldcw\t%3", operands);
7586 if (stack_top_dies || dimode_p)
7587 output_asm_insn ("fistp%z0\t%0", operands);
7589 output_asm_insn ("fist%z0\t%0", operands);
7590 if (round_mode != I387_CW_ANY)
7591 output_asm_insn ("fldcw\t%2", operands);
7597 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7598 should be used. UNORDERED_P is true when fucom should be used. */
7601 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7604 rtx cmp_op0, cmp_op1;
7605 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
7609 cmp_op0 = operands[0];
7610 cmp_op1 = operands[1];
7614 cmp_op0 = operands[1];
7615 cmp_op1 = operands[2];
7620 if (GET_MODE (operands[0]) == SFmode)
7622 return "ucomiss\t{%1, %0|%0, %1}";
7624 return "comiss\t{%1, %0|%0, %1}";
7627 return "ucomisd\t{%1, %0|%0, %1}";
7629 return "comisd\t{%1, %0|%0, %1}";
7632 gcc_assert (STACK_TOP_P (cmp_op0));
7634 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7636 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
7640 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
7641 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
7644 return "ftst\n\tfnstsw\t%0";
7647 if (STACK_REG_P (cmp_op1)
7649 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7650 && REGNO (cmp_op1) != FIRST_STACK_REG)
7652 /* If both the top of the 387 stack dies, and the other operand
7653 is also a stack register that dies, then this must be a
7654 `fcompp' float compare */
7658 /* There is no double popping fcomi variant. Fortunately,
7659 eflags is immune from the fstp's cc clobbering. */
7661 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7663 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7664 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7669 return "fucompp\n\tfnstsw\t%0";
7671 return "fcompp\n\tfnstsw\t%0";
7676 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7678 static const char * const alt[16] =
7680 "fcom%z2\t%y2\n\tfnstsw\t%0",
7681 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7682 "fucom%z2\t%y2\n\tfnstsw\t%0",
7683 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7685 "ficom%z2\t%y2\n\tfnstsw\t%0",
7686 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7690 "fcomi\t{%y1, %0|%0, %y1}",
7691 "fcomip\t{%y1, %0|%0, %y1}",
7692 "fucomi\t{%y1, %0|%0, %y1}",
7693 "fucomip\t{%y1, %0|%0, %y1}",
7704 mask = eflags_p << 3;
7705 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
7706 mask |= unordered_p << 1;
7707 mask |= stack_top_dies;
7709 gcc_assert (mask < 16);
7718 ix86_output_addr_vec_elt (FILE *file, int value)
7720 const char *directive = ASM_LONG;
7724 directive = ASM_QUAD;
7726 gcc_assert (!TARGET_64BIT);
7729 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
7733 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
7736 fprintf (file, "%s%s%d-%s%d\n",
7737 ASM_LONG, LPREFIX, value, LPREFIX, rel);
7738 else if (HAVE_AS_GOTOFF_IN_DATA)
7739 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
7741 else if (TARGET_MACHO)
7743 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
7744 machopic_output_function_base_name (file);
7745 fprintf(file, "\n");
7749 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
7750 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
7753 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
7757 ix86_expand_clear (rtx dest)
7761 /* We play register width games, which are only valid after reload. */
7762 gcc_assert (reload_completed);
7764 /* Avoid HImode and its attendant prefix byte. */
7765 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
7766 dest = gen_rtx_REG (SImode, REGNO (dest));
7768 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
7770 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
7771 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
7773 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
7774 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
7780 /* X is an unchanging MEM. If it is a constant pool reference, return
7781 the constant pool rtx, else NULL. */
7784 maybe_get_pool_constant (rtx x)
7786 x = ix86_delegitimize_address (XEXP (x, 0));
7788 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
7789 return get_pool_constant (x);
7795 ix86_expand_move (enum machine_mode mode, rtx operands[])
7797 int strict = (reload_in_progress || reload_completed);
7799 enum tls_model model;
7804 if (GET_CODE (op1) == SYMBOL_REF)
7806 model = SYMBOL_REF_TLS_MODEL (op1);
7809 op1 = legitimize_tls_address (op1, model, true);
7810 op1 = force_operand (op1, op0);
7815 else if (GET_CODE (op1) == CONST
7816 && GET_CODE (XEXP (op1, 0)) == PLUS
7817 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
7819 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
7822 rtx addend = XEXP (XEXP (op1, 0), 1);
7823 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
7824 op1 = force_operand (op1, NULL);
7825 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
7826 op0, 1, OPTAB_DIRECT);
7832 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
7837 rtx temp = ((reload_in_progress
7838 || ((op0 && GET_CODE (op0) == REG)
7840 ? op0 : gen_reg_rtx (Pmode));
7841 op1 = machopic_indirect_data_reference (op1, temp);
7842 op1 = machopic_legitimize_pic_address (op1, mode,
7843 temp == op1 ? 0 : temp);
7845 else if (MACHOPIC_INDIRECT)
7846 op1 = machopic_indirect_data_reference (op1, 0);
7850 if (GET_CODE (op0) == MEM)
7851 op1 = force_reg (Pmode, op1);
7853 op1 = legitimize_address (op1, op1, Pmode);
7854 #endif /* TARGET_MACHO */
7858 if (GET_CODE (op0) == MEM
7859 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
7860 || !push_operand (op0, mode))
7861 && GET_CODE (op1) == MEM)
7862 op1 = force_reg (mode, op1);
7864 if (push_operand (op0, mode)
7865 && ! general_no_elim_operand (op1, mode))
7866 op1 = copy_to_mode_reg (mode, op1);
7868 /* Force large constants in 64bit compilation into register
7869 to get them CSEed. */
7870 if (TARGET_64BIT && mode == DImode
7871 && immediate_operand (op1, mode)
7872 && !x86_64_zext_immediate_operand (op1, VOIDmode)
7873 && !register_operand (op0, mode)
7874 && optimize && !reload_completed && !reload_in_progress)
7875 op1 = copy_to_mode_reg (mode, op1);
7877 if (FLOAT_MODE_P (mode))
7879 /* If we are loading a floating point constant to a register,
7880 force the value to memory now, since we'll get better code
7881 out the back end. */
7885 else if (GET_CODE (op1) == CONST_DOUBLE)
7887 op1 = validize_mem (force_const_mem (mode, op1));
7888 if (!register_operand (op0, mode))
7890 rtx temp = gen_reg_rtx (mode);
7891 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
7892 emit_move_insn (op0, temp);
7899 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7903 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
7905 rtx op0 = operands[0], op1 = operands[1];
7907 /* Force constants other than zero into memory. We do not know how
7908 the instructions used to build constants modify the upper 64 bits
7909 of the register, once we have that information we may be able
7910 to handle some of them more efficiently. */
7911 if ((reload_in_progress | reload_completed) == 0
7912 && register_operand (op0, mode)
7913 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
7914 op1 = validize_mem (force_const_mem (mode, op1));
7916 /* Make operand1 a register if it isn't already. */
7918 && !register_operand (op0, mode)
7919 && !register_operand (op1, mode))
7921 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
7925 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7928 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
7929 straight to ix86_expand_vector_move. */
7932 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
7941 /* If we're optimizing for size, movups is the smallest. */
7944 op0 = gen_lowpart (V4SFmode, op0);
7945 op1 = gen_lowpart (V4SFmode, op1);
7946 emit_insn (gen_sse_movups (op0, op1));
7950 /* ??? If we have typed data, then it would appear that using
7951 movdqu is the only way to get unaligned data loaded with
7953 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7955 op0 = gen_lowpart (V16QImode, op0);
7956 op1 = gen_lowpart (V16QImode, op1);
7957 emit_insn (gen_sse2_movdqu (op0, op1));
7961 if (TARGET_SSE2 && mode == V2DFmode)
7965 /* When SSE registers are split into halves, we can avoid
7966 writing to the top half twice. */
7967 if (TARGET_SSE_SPLIT_REGS)
7969 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
7974 /* ??? Not sure about the best option for the Intel chips.
7975 The following would seem to satisfy; the register is
7976 entirely cleared, breaking the dependency chain. We
7977 then store to the upper half, with a dependency depth
7978 of one. A rumor has it that Intel recommends two movsd
7979 followed by an unpacklpd, but this is unconfirmed. And
7980 given that the dependency depth of the unpacklpd would
7981 still be one, I'm not sure why this would be better. */
7982 zero = CONST0_RTX (V2DFmode);
7985 m = adjust_address (op1, DFmode, 0);
7986 emit_insn (gen_sse2_loadlpd (op0, zero, m));
7987 m = adjust_address (op1, DFmode, 8);
7988 emit_insn (gen_sse2_loadhpd (op0, op0, m));
7992 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
7993 emit_move_insn (op0, CONST0_RTX (mode));
7995 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
7997 if (mode != V4SFmode)
7998 op0 = gen_lowpart (V4SFmode, op0);
7999 m = adjust_address (op1, V2SFmode, 0);
8000 emit_insn (gen_sse_loadlps (op0, op0, m));
8001 m = adjust_address (op1, V2SFmode, 8);
8002 emit_insn (gen_sse_loadhps (op0, op0, m));
8005 else if (MEM_P (op0))
8007 /* If we're optimizing for size, movups is the smallest. */
8010 op0 = gen_lowpart (V4SFmode, op0);
8011 op1 = gen_lowpart (V4SFmode, op1);
8012 emit_insn (gen_sse_movups (op0, op1));
8016 /* ??? Similar to above, only less clear because of quote
8017 typeless stores unquote. */
8018 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
8019 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8021 op0 = gen_lowpart (V16QImode, op0);
8022 op1 = gen_lowpart (V16QImode, op1);
8023 emit_insn (gen_sse2_movdqu (op0, op1));
8027 if (TARGET_SSE2 && mode == V2DFmode)
8029 m = adjust_address (op0, DFmode, 0);
8030 emit_insn (gen_sse2_storelpd (m, op1));
8031 m = adjust_address (op0, DFmode, 8);
8032 emit_insn (gen_sse2_storehpd (m, op1));
8036 if (mode != V4SFmode)
8037 op1 = gen_lowpart (V4SFmode, op1);
8038 m = adjust_address (op0, V2SFmode, 0);
8039 emit_insn (gen_sse_storelps (m, op1));
8040 m = adjust_address (op0, V2SFmode, 8);
8041 emit_insn (gen_sse_storehps (m, op1));
8048 /* Expand a push in MODE. This is some mode for which we do not support
8049 proper push instructions, at least from the registers that we expect
8050 the value to live in. */
8053 ix86_expand_push (enum machine_mode mode, rtx x)
8057 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
8058 GEN_INT (-GET_MODE_SIZE (mode)),
8059 stack_pointer_rtx, 1, OPTAB_DIRECT);
8060 if (tmp != stack_pointer_rtx)
8061 emit_move_insn (stack_pointer_rtx, tmp);
8063 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
8064 emit_move_insn (tmp, x);
8067 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
8068 destination to use for the operation. If different from the true
8069 destination in operands[0], a copy operation will be required. */
8072 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
8075 int matching_memory;
8076 rtx src1, src2, dst;
8082 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
8083 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8084 && (rtx_equal_p (dst, src2)
8085 || immediate_operand (src1, mode)))
8092 /* If the destination is memory, and we do not have matching source
8093 operands, do things in registers. */
8094 matching_memory = 0;
8095 if (GET_CODE (dst) == MEM)
8097 if (rtx_equal_p (dst, src1))
8098 matching_memory = 1;
8099 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8100 && rtx_equal_p (dst, src2))
8101 matching_memory = 2;
8103 dst = gen_reg_rtx (mode);
8106 /* Both source operands cannot be in memory. */
8107 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
8109 if (matching_memory != 2)
8110 src2 = force_reg (mode, src2);
8112 src1 = force_reg (mode, src1);
8115 /* If the operation is not commutable, source 1 cannot be a constant
8116 or non-matching memory. */
8117 if ((CONSTANT_P (src1)
8118 || (!matching_memory && GET_CODE (src1) == MEM))
8119 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8120 src1 = force_reg (mode, src1);
8122 /* If optimizing, copy to regs to improve CSE */
8123 if (optimize && ! no_new_pseudos)
8125 if (GET_CODE (dst) == MEM)
8126 dst = gen_reg_rtx (mode);
8127 if (GET_CODE (src1) == MEM)
8128 src1 = force_reg (mode, src1);
8129 if (GET_CODE (src2) == MEM)
8130 src2 = force_reg (mode, src2);
8133 src1 = operands[1] = src1;
8134 src2 = operands[2] = src2;
8138 /* Similarly, but assume that the destination has already been
8142 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
8143 enum machine_mode mode, rtx operands[])
8145 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
8146 gcc_assert (dst == operands[0]);
8149 /* Attempt to expand a binary operator. Make the expansion closer to the
8150 actual machine, then just general_operand, which will allow 3 separate
8151 memory references (one output, two input) in a single insn. */
8154 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
8157 rtx src1, src2, dst, op, clob;
8159 dst = ix86_fixup_binary_operands (code, mode, operands);
8163 /* Emit the instruction. */
8165 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
8166 if (reload_in_progress)
8168 /* Reload doesn't know about the flags register, and doesn't know that
8169 it doesn't want to clobber it. We can only do this with PLUS. */
8170 gcc_assert (code == PLUS);
8175 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8176 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8179 /* Fix up the destination if needed. */
8180 if (dst != operands[0])
8181 emit_move_insn (operands[0], dst);
8184 /* Return TRUE or FALSE depending on whether the binary operator meets the
8185 appropriate constraints. */
8188 ix86_binary_operator_ok (enum rtx_code code,
8189 enum machine_mode mode ATTRIBUTE_UNUSED,
8192 /* Both source operands cannot be in memory. */
8193 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
8195 /* If the operation is not commutable, source 1 cannot be a constant. */
8196 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8198 /* If the destination is memory, we must have a matching source operand. */
8199 if (GET_CODE (operands[0]) == MEM
8200 && ! (rtx_equal_p (operands[0], operands[1])
8201 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8202 && rtx_equal_p (operands[0], operands[2]))))
8204 /* If the operation is not commutable and the source 1 is memory, we must
8205 have a matching destination. */
8206 if (GET_CODE (operands[1]) == MEM
8207 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
8208 && ! rtx_equal_p (operands[0], operands[1]))
8213 /* Attempt to expand a unary operator. Make the expansion closer to the
8214 actual machine, then just general_operand, which will allow 2 separate
8215 memory references (one output, one input) in a single insn. */
8218 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
8221 int matching_memory;
8222 rtx src, dst, op, clob;
8227 /* If the destination is memory, and we do not have matching source
8228 operands, do things in registers. */
8229 matching_memory = 0;
8232 if (rtx_equal_p (dst, src))
8233 matching_memory = 1;
8235 dst = gen_reg_rtx (mode);
8238 /* When source operand is memory, destination must match. */
8239 if (MEM_P (src) && !matching_memory)
8240 src = force_reg (mode, src);
8242 /* If optimizing, copy to regs to improve CSE. */
8243 if (optimize && ! no_new_pseudos)
8245 if (GET_CODE (dst) == MEM)
8246 dst = gen_reg_rtx (mode);
8247 if (GET_CODE (src) == MEM)
8248 src = force_reg (mode, src);
8251 /* Emit the instruction. */
8253 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
8254 if (reload_in_progress || code == NOT)
8256 /* Reload doesn't know about the flags register, and doesn't know that
8257 it doesn't want to clobber it. */
8258 gcc_assert (code == NOT);
8263 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8264 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8267 /* Fix up the destination if needed. */
8268 if (dst != operands[0])
8269 emit_move_insn (operands[0], dst);
8272 /* Return TRUE or FALSE depending on whether the unary operator meets the
8273 appropriate constraints. */
8276 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
8277 enum machine_mode mode ATTRIBUTE_UNUSED,
8278 rtx operands[2] ATTRIBUTE_UNUSED)
8280 /* If one of operands is memory, source and destination must match. */
8281 if ((GET_CODE (operands[0]) == MEM
8282 || GET_CODE (operands[1]) == MEM)
8283 && ! rtx_equal_p (operands[0], operands[1]))
8288 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
8289 Create a mask for the sign bit in MODE for an SSE register. If VECT is
8290 true, then replicate the mask for all elements of the vector register.
8291 If INVERT is true, then create a mask excluding the sign bit. */
8294 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
8296 enum machine_mode vec_mode;
8297 HOST_WIDE_INT hi, lo;
8302 /* Find the sign bit, sign extended to 2*HWI. */
8304 lo = 0x80000000, hi = lo < 0;
8305 else if (HOST_BITS_PER_WIDE_INT >= 64)
8306 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8308 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8313 /* Force this value into the low part of a fp vector constant. */
8314 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
8315 mask = gen_lowpart (mode, mask);
8320 v = gen_rtvec (4, mask, mask, mask, mask);
8322 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8323 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8324 vec_mode = V4SFmode;
8329 v = gen_rtvec (2, mask, mask);
8331 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8332 vec_mode = V2DFmode;
8335 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
8338 /* Generate code for floating point ABS or NEG. */
8341 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
8344 rtx mask, set, use, clob, dst, src;
8345 bool matching_memory;
8346 bool use_sse = false;
8347 bool vector_mode = VECTOR_MODE_P (mode);
8348 enum machine_mode elt_mode = mode;
8352 elt_mode = GET_MODE_INNER (mode);
8355 else if (TARGET_SSE_MATH)
8356 use_sse = SSE_FLOAT_MODE_P (mode);
8358 /* NEG and ABS performed with SSE use bitwise mask operations.
8359 Create the appropriate mask now. */
8361 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
8364 /* When not using SSE, we don't use the mask, but prefer to keep the
8365 same general form of the insn pattern to reduce duplication when
8366 it comes time to split. */
8373 /* If the destination is memory, and we don't have matching source
8374 operands, do things in registers. */
8375 matching_memory = false;
8378 if (rtx_equal_p (dst, src) && (!optimize || no_new_pseudos))
8379 matching_memory = true;
8381 dst = gen_reg_rtx (mode);
8383 if (MEM_P (src) && !matching_memory)
8384 src = force_reg (mode, src);
8388 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8389 set = gen_rtx_SET (VOIDmode, dst, set);
8394 set = gen_rtx_fmt_e (code, mode, src);
8395 set = gen_rtx_SET (VOIDmode, dst, set);
8396 use = gen_rtx_USE (VOIDmode, mask);
8397 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8398 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8401 if (dst != operands[0])
8402 emit_move_insn (operands[0], dst);
8405 /* Expand a copysign operation. Special case operand 0 being a constant. */
8408 ix86_expand_copysign (rtx operands[])
8410 enum machine_mode mode, vmode;
8411 rtx dest, op0, op1, mask, nmask;
8417 mode = GET_MODE (dest);
8418 vmode = mode == SFmode ? V4SFmode : V2DFmode;
8420 if (GET_CODE (op0) == CONST_DOUBLE)
8424 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
8425 op0 = simplify_unary_operation (ABS, mode, op0, mode);
8427 if (op0 == CONST0_RTX (mode))
8428 op0 = CONST0_RTX (vmode);
8432 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
8433 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8435 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
8436 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
8439 mask = ix86_build_signbit_mask (mode, 0, 0);
8442 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
8444 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
8448 nmask = ix86_build_signbit_mask (mode, 0, 1);
8449 mask = ix86_build_signbit_mask (mode, 0, 0);
8452 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
8454 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
8458 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
8459 be a constant, and so has already been expanded into a vector constant. */
8462 ix86_split_copysign_const (rtx operands[])
8464 enum machine_mode mode, vmode;
8465 rtx dest, op0, op1, mask, x;
8472 mode = GET_MODE (dest);
8473 vmode = GET_MODE (mask);
8475 dest = simplify_gen_subreg (vmode, dest, mode, 0);
8476 x = gen_rtx_AND (vmode, dest, mask);
8477 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8479 if (op0 != CONST0_RTX (vmode))
8481 x = gen_rtx_IOR (vmode, dest, op0);
8482 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8486 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
8487 so we have to do two masks. */
8490 ix86_split_copysign_var (rtx operands[])
8492 enum machine_mode mode, vmode;
8493 rtx dest, scratch, op0, op1, mask, nmask, x;
8496 scratch = operands[1];
8499 nmask = operands[4];
8502 mode = GET_MODE (dest);
8503 vmode = GET_MODE (mask);
8505 if (rtx_equal_p (op0, op1))
8507 /* Shouldn't happen often (it's useless, obviously), but when it does
8508 we'd generate incorrect code if we continue below. */
8509 emit_move_insn (dest, op0);
8513 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
8515 gcc_assert (REGNO (op1) == REGNO (scratch));
8517 x = gen_rtx_AND (vmode, scratch, mask);
8518 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8521 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8522 x = gen_rtx_NOT (vmode, dest);
8523 x = gen_rtx_AND (vmode, x, op0);
8524 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8528 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
8530 x = gen_rtx_AND (vmode, scratch, mask);
8532 else /* alternative 2,4 */
8534 gcc_assert (REGNO (mask) == REGNO (scratch));
8535 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
8536 x = gen_rtx_AND (vmode, scratch, op1);
8538 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8540 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
8542 dest = simplify_gen_subreg (vmode, op0, mode, 0);
8543 x = gen_rtx_AND (vmode, dest, nmask);
8545 else /* alternative 3,4 */
8547 gcc_assert (REGNO (nmask) == REGNO (dest));
8549 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8550 x = gen_rtx_AND (vmode, dest, op0);
8552 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8555 x = gen_rtx_IOR (vmode, dest, scratch);
8556 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8559 /* Return TRUE or FALSE depending on whether the first SET in INSN
8560 has source and destination with matching CC modes, and that the
8561 CC mode is at least as constrained as REQ_MODE. */
8564 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
8567 enum machine_mode set_mode;
8569 set = PATTERN (insn);
8570 if (GET_CODE (set) == PARALLEL)
8571 set = XVECEXP (set, 0, 0);
8572 gcc_assert (GET_CODE (set) == SET);
8573 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
8575 set_mode = GET_MODE (SET_DEST (set));
8579 if (req_mode != CCNOmode
8580 && (req_mode != CCmode
8581 || XEXP (SET_SRC (set), 1) != const0_rtx))
8585 if (req_mode == CCGCmode)
8589 if (req_mode == CCGOCmode || req_mode == CCNOmode)
8593 if (req_mode == CCZmode)
8603 return (GET_MODE (SET_SRC (set)) == set_mode);
8606 /* Generate insn patterns to do an integer compare of OPERANDS. */
8609 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
8611 enum machine_mode cmpmode;
8614 cmpmode = SELECT_CC_MODE (code, op0, op1);
8615 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
8617 /* This is very simple, but making the interface the same as in the
8618 FP case makes the rest of the code easier. */
8619 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
8620 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
8622 /* Return the test that should be put into the flags user, i.e.
8623 the bcc, scc, or cmov instruction. */
8624 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
8627 /* Figure out whether to use ordered or unordered fp comparisons.
8628 Return the appropriate mode to use. */
8631 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
8633 /* ??? In order to make all comparisons reversible, we do all comparisons
8634 non-trapping when compiling for IEEE. Once gcc is able to distinguish
8635 all forms trapping and nontrapping comparisons, we can make inequality
8636 comparisons trapping again, since it results in better code when using
8637 FCOM based compares. */
8638 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
8642 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
8644 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8645 return ix86_fp_compare_mode (code);
8648 /* Only zero flag is needed. */
8650 case NE: /* ZF!=0 */
8652 /* Codes needing carry flag. */
8653 case GEU: /* CF=0 */
8654 case GTU: /* CF=0 & ZF=0 */
8655 case LTU: /* CF=1 */
8656 case LEU: /* CF=1 | ZF=1 */
8658 /* Codes possibly doable only with sign flag when
8659 comparing against zero. */
8660 case GE: /* SF=OF or SF=0 */
8661 case LT: /* SF<>OF or SF=1 */
8662 if (op1 == const0_rtx)
8665 /* For other cases Carry flag is not required. */
8667 /* Codes doable only with sign flag when comparing
8668 against zero, but we miss jump instruction for it
8669 so we need to use relational tests against overflow
8670 that thus needs to be zero. */
8671 case GT: /* ZF=0 & SF=OF */
8672 case LE: /* ZF=1 | SF<>OF */
8673 if (op1 == const0_rtx)
8677 /* strcmp pattern do (use flags) and combine may ask us for proper
8686 /* Return the fixed registers used for condition codes. */
8689 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
8696 /* If two condition code modes are compatible, return a condition code
8697 mode which is compatible with both. Otherwise, return
8700 static enum machine_mode
8701 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
8706 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
8709 if ((m1 == CCGCmode && m2 == CCGOCmode)
8710 || (m1 == CCGOCmode && m2 == CCGCmode))
8738 /* These are only compatible with themselves, which we already
8744 /* Return true if we should use an FCOMI instruction for this fp comparison. */
8747 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
8749 enum rtx_code swapped_code = swap_condition (code);
8750 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
8751 || (ix86_fp_comparison_cost (swapped_code)
8752 == ix86_fp_comparison_fcomi_cost (swapped_code)));
8755 /* Swap, force into registers, or otherwise massage the two operands
8756 to a fp comparison. The operands are updated in place; the new
8757 comparison code is returned. */
8759 static enum rtx_code
8760 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
8762 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
8763 rtx op0 = *pop0, op1 = *pop1;
8764 enum machine_mode op_mode = GET_MODE (op0);
8765 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
8767 /* All of the unordered compare instructions only work on registers.
8768 The same is true of the fcomi compare instructions. The same is
8769 true of the XFmode compare instructions if not comparing with
8770 zero (ftst insn is used in this case). */
8773 && (fpcmp_mode == CCFPUmode
8774 || (op_mode == XFmode
8775 && ! (standard_80387_constant_p (op0) == 1
8776 || standard_80387_constant_p (op1) == 1))
8777 || ix86_use_fcomi_compare (code)))
8779 op0 = force_reg (op_mode, op0);
8780 op1 = force_reg (op_mode, op1);
8784 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
8785 things around if they appear profitable, otherwise force op0
8788 if (standard_80387_constant_p (op0) == 0
8789 || (GET_CODE (op0) == MEM
8790 && ! (standard_80387_constant_p (op1) == 0
8791 || GET_CODE (op1) == MEM)))
8794 tmp = op0, op0 = op1, op1 = tmp;
8795 code = swap_condition (code);
8798 if (GET_CODE (op0) != REG)
8799 op0 = force_reg (op_mode, op0);
8801 if (CONSTANT_P (op1))
8803 int tmp = standard_80387_constant_p (op1);
8805 op1 = validize_mem (force_const_mem (op_mode, op1));
8809 op1 = force_reg (op_mode, op1);
8812 op1 = force_reg (op_mode, op1);
8816 /* Try to rearrange the comparison to make it cheaper. */
8817 if (ix86_fp_comparison_cost (code)
8818 > ix86_fp_comparison_cost (swap_condition (code))
8819 && (GET_CODE (op1) == REG || !no_new_pseudos))
8822 tmp = op0, op0 = op1, op1 = tmp;
8823 code = swap_condition (code);
8824 if (GET_CODE (op0) != REG)
8825 op0 = force_reg (op_mode, op0);
8833 /* Convert comparison codes we use to represent FP comparison to integer
8834 code that will result in proper branch. Return UNKNOWN if no such code
8838 ix86_fp_compare_code_to_integer (enum rtx_code code)
8867 /* Split comparison code CODE into comparisons we can do using branch
8868 instructions. BYPASS_CODE is comparison code for branch that will
8869 branch around FIRST_CODE and SECOND_CODE. If some of branches
8870 is not required, set value to UNKNOWN.
8871 We never require more than two branches. */
8874 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
8875 enum rtx_code *first_code,
8876 enum rtx_code *second_code)
8879 *bypass_code = UNKNOWN;
8880 *second_code = UNKNOWN;
8882 /* The fcomi comparison sets flags as follows:
8892 case GT: /* GTU - CF=0 & ZF=0 */
8893 case GE: /* GEU - CF=0 */
8894 case ORDERED: /* PF=0 */
8895 case UNORDERED: /* PF=1 */
8896 case UNEQ: /* EQ - ZF=1 */
8897 case UNLT: /* LTU - CF=1 */
8898 case UNLE: /* LEU - CF=1 | ZF=1 */
8899 case LTGT: /* EQ - ZF=0 */
8901 case LT: /* LTU - CF=1 - fails on unordered */
8903 *bypass_code = UNORDERED;
8905 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
8907 *bypass_code = UNORDERED;
8909 case EQ: /* EQ - ZF=1 - fails on unordered */
8911 *bypass_code = UNORDERED;
8913 case NE: /* NE - ZF=0 - fails on unordered */
8915 *second_code = UNORDERED;
8917 case UNGE: /* GEU - CF=0 - fails on unordered */
8919 *second_code = UNORDERED;
8921 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
8923 *second_code = UNORDERED;
8928 if (!TARGET_IEEE_FP)
8930 *second_code = UNKNOWN;
8931 *bypass_code = UNKNOWN;
8935 /* Return cost of comparison done fcom + arithmetics operations on AX.
8936 All following functions do use number of instructions as a cost metrics.
8937 In future this should be tweaked to compute bytes for optimize_size and
8938 take into account performance of various instructions on various CPUs. */
8940 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
8942 if (!TARGET_IEEE_FP)
8944 /* The cost of code output by ix86_expand_fp_compare. */
8972 /* Return cost of comparison done using fcomi operation.
8973 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8975 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
8977 enum rtx_code bypass_code, first_code, second_code;
8978 /* Return arbitrarily high cost when instruction is not supported - this
8979 prevents gcc from using it. */
8982 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8983 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
8986 /* Return cost of comparison done using sahf operation.
8987 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8989 ix86_fp_comparison_sahf_cost (enum rtx_code code)
8991 enum rtx_code bypass_code, first_code, second_code;
8992 /* Return arbitrarily high cost when instruction is not preferred - this
8993 avoids gcc from using it. */
8994 if (!TARGET_USE_SAHF && !optimize_size)
8996 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8997 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
9000 /* Compute cost of the comparison done using any method.
9001 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9003 ix86_fp_comparison_cost (enum rtx_code code)
9005 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
9008 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
9009 sahf_cost = ix86_fp_comparison_sahf_cost (code);
9011 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
9012 if (min > sahf_cost)
9014 if (min > fcomi_cost)
9019 /* Generate insn patterns to do a floating point compare of OPERANDS. */
9022 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
9023 rtx *second_test, rtx *bypass_test)
9025 enum machine_mode fpcmp_mode, intcmp_mode;
9027 int cost = ix86_fp_comparison_cost (code);
9028 enum rtx_code bypass_code, first_code, second_code;
9030 fpcmp_mode = ix86_fp_compare_mode (code);
9031 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
9034 *second_test = NULL_RTX;
9036 *bypass_test = NULL_RTX;
9038 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9040 /* Do fcomi/sahf based test when profitable. */
9041 if ((bypass_code == UNKNOWN || bypass_test)
9042 && (second_code == UNKNOWN || second_test)
9043 && ix86_fp_comparison_arithmetics_cost (code) > cost)
9047 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9048 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
9054 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9055 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9057 scratch = gen_reg_rtx (HImode);
9058 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9059 emit_insn (gen_x86_sahf_1 (scratch));
9062 /* The FP codes work out to act like unsigned. */
9063 intcmp_mode = fpcmp_mode;
9065 if (bypass_code != UNKNOWN)
9066 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
9067 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9069 if (second_code != UNKNOWN)
9070 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
9071 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9076 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
9077 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9078 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9080 scratch = gen_reg_rtx (HImode);
9081 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9083 /* In the unordered case, we have to check C2 for NaN's, which
9084 doesn't happen to work out to anything nice combination-wise.
9085 So do some bit twiddling on the value we've got in AH to come
9086 up with an appropriate set of condition codes. */
9088 intcmp_mode = CCNOmode;
9093 if (code == GT || !TARGET_IEEE_FP)
9095 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9100 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9101 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9102 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
9103 intcmp_mode = CCmode;
9109 if (code == LT && TARGET_IEEE_FP)
9111 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9112 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
9113 intcmp_mode = CCmode;
9118 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
9124 if (code == GE || !TARGET_IEEE_FP)
9126 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
9131 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9132 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9139 if (code == LE && TARGET_IEEE_FP)
9141 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9142 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9143 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9144 intcmp_mode = CCmode;
9149 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9155 if (code == EQ && TARGET_IEEE_FP)
9157 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9158 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9159 intcmp_mode = CCmode;
9164 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9171 if (code == NE && TARGET_IEEE_FP)
9173 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9174 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9180 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9186 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9190 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9199 /* Return the test that should be put into the flags user, i.e.
9200 the bcc, scc, or cmov instruction. */
9201 return gen_rtx_fmt_ee (code, VOIDmode,
9202 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9207 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
9210 op0 = ix86_compare_op0;
9211 op1 = ix86_compare_op1;
9214 *second_test = NULL_RTX;
9216 *bypass_test = NULL_RTX;
9218 if (ix86_compare_emitted)
9220 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
9221 ix86_compare_emitted = NULL_RTX;
9223 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9224 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9225 second_test, bypass_test);
9227 ret = ix86_expand_int_compare (code, op0, op1);
9232 /* Return true if the CODE will result in nontrivial jump sequence. */
9234 ix86_fp_jump_nontrivial_p (enum rtx_code code)
9236 enum rtx_code bypass_code, first_code, second_code;
9239 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9240 return bypass_code != UNKNOWN || second_code != UNKNOWN;
9244 ix86_expand_branch (enum rtx_code code, rtx label)
9248 switch (GET_MODE (ix86_compare_op0))
9254 tmp = ix86_expand_compare (code, NULL, NULL);
9255 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9256 gen_rtx_LABEL_REF (VOIDmode, label),
9258 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
9267 enum rtx_code bypass_code, first_code, second_code;
9269 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
9272 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9274 /* Check whether we will use the natural sequence with one jump. If
9275 so, we can expand jump early. Otherwise delay expansion by
9276 creating compound insn to not confuse optimizers. */
9277 if (bypass_code == UNKNOWN && second_code == UNKNOWN
9280 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
9281 gen_rtx_LABEL_REF (VOIDmode, label),
9282 pc_rtx, NULL_RTX, NULL_RTX);
9286 tmp = gen_rtx_fmt_ee (code, VOIDmode,
9287 ix86_compare_op0, ix86_compare_op1);
9288 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9289 gen_rtx_LABEL_REF (VOIDmode, label),
9291 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
9293 use_fcomi = ix86_use_fcomi_compare (code);
9294 vec = rtvec_alloc (3 + !use_fcomi);
9295 RTVEC_ELT (vec, 0) = tmp;
9297 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
9299 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
9302 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
9304 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
9312 /* Expand DImode branch into multiple compare+branch. */
9314 rtx lo[2], hi[2], label2;
9315 enum rtx_code code1, code2, code3;
9317 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
9319 tmp = ix86_compare_op0;
9320 ix86_compare_op0 = ix86_compare_op1;
9321 ix86_compare_op1 = tmp;
9322 code = swap_condition (code);
9324 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
9325 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
9327 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
9328 avoid two branches. This costs one extra insn, so disable when
9329 optimizing for size. */
9331 if ((code == EQ || code == NE)
9333 || hi[1] == const0_rtx || lo[1] == const0_rtx))
9338 if (hi[1] != const0_rtx)
9339 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
9340 NULL_RTX, 0, OPTAB_WIDEN);
9343 if (lo[1] != const0_rtx)
9344 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
9345 NULL_RTX, 0, OPTAB_WIDEN);
9347 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
9348 NULL_RTX, 0, OPTAB_WIDEN);
9350 ix86_compare_op0 = tmp;
9351 ix86_compare_op1 = const0_rtx;
9352 ix86_expand_branch (code, label);
9356 /* Otherwise, if we are doing less-than or greater-or-equal-than,
9357 op1 is a constant and the low word is zero, then we can just
9358 examine the high word. */
9360 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
9363 case LT: case LTU: case GE: case GEU:
9364 ix86_compare_op0 = hi[0];
9365 ix86_compare_op1 = hi[1];
9366 ix86_expand_branch (code, label);
9372 /* Otherwise, we need two or three jumps. */
9374 label2 = gen_label_rtx ();
9377 code2 = swap_condition (code);
9378 code3 = unsigned_condition (code);
9382 case LT: case GT: case LTU: case GTU:
9385 case LE: code1 = LT; code2 = GT; break;
9386 case GE: code1 = GT; code2 = LT; break;
9387 case LEU: code1 = LTU; code2 = GTU; break;
9388 case GEU: code1 = GTU; code2 = LTU; break;
9390 case EQ: code1 = UNKNOWN; code2 = NE; break;
9391 case NE: code2 = UNKNOWN; break;
9399 * if (hi(a) < hi(b)) goto true;
9400 * if (hi(a) > hi(b)) goto false;
9401 * if (lo(a) < lo(b)) goto true;
9405 ix86_compare_op0 = hi[0];
9406 ix86_compare_op1 = hi[1];
9408 if (code1 != UNKNOWN)
9409 ix86_expand_branch (code1, label);
9410 if (code2 != UNKNOWN)
9411 ix86_expand_branch (code2, label2);
9413 ix86_compare_op0 = lo[0];
9414 ix86_compare_op1 = lo[1];
9415 ix86_expand_branch (code3, label);
9417 if (code2 != UNKNOWN)
9418 emit_label (label2);
9427 /* Split branch based on floating point condition. */
9429 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
9430 rtx target1, rtx target2, rtx tmp, rtx pushed)
9433 rtx label = NULL_RTX;
9435 int bypass_probability = -1, second_probability = -1, probability = -1;
9438 if (target2 != pc_rtx)
9441 code = reverse_condition_maybe_unordered (code);
9446 condition = ix86_expand_fp_compare (code, op1, op2,
9447 tmp, &second, &bypass);
9449 /* Remove pushed operand from stack. */
9451 ix86_free_from_memory (GET_MODE (pushed));
9453 if (split_branch_probability >= 0)
9455 /* Distribute the probabilities across the jumps.
9456 Assume the BYPASS and SECOND to be always test
9458 probability = split_branch_probability;
9460 /* Value of 1 is low enough to make no need for probability
9461 to be updated. Later we may run some experiments and see
9462 if unordered values are more frequent in practice. */
9464 bypass_probability = 1;
9466 second_probability = 1;
9468 if (bypass != NULL_RTX)
9470 label = gen_label_rtx ();
9471 i = emit_jump_insn (gen_rtx_SET
9473 gen_rtx_IF_THEN_ELSE (VOIDmode,
9475 gen_rtx_LABEL_REF (VOIDmode,
9478 if (bypass_probability >= 0)
9480 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9481 GEN_INT (bypass_probability),
9484 i = emit_jump_insn (gen_rtx_SET
9486 gen_rtx_IF_THEN_ELSE (VOIDmode,
9487 condition, target1, target2)));
9488 if (probability >= 0)
9490 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9491 GEN_INT (probability),
9493 if (second != NULL_RTX)
9495 i = emit_jump_insn (gen_rtx_SET
9497 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9499 if (second_probability >= 0)
9501 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9502 GEN_INT (second_probability),
9505 if (label != NULL_RTX)
9510 ix86_expand_setcc (enum rtx_code code, rtx dest)
9512 rtx ret, tmp, tmpreg, equiv;
9513 rtx second_test, bypass_test;
9515 if (GET_MODE (ix86_compare_op0) == DImode
9517 return 0; /* FAIL */
9519 gcc_assert (GET_MODE (dest) == QImode);
9521 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9522 PUT_MODE (ret, QImode);
9527 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
9528 if (bypass_test || second_test)
9530 rtx test = second_test;
9532 rtx tmp2 = gen_reg_rtx (QImode);
9535 gcc_assert (!second_test);
9538 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
9540 PUT_MODE (test, QImode);
9541 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
9544 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
9546 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
9549 /* Attach a REG_EQUAL note describing the comparison result. */
9550 if (ix86_compare_op0 && ix86_compare_op1)
9552 equiv = simplify_gen_relational (code, QImode,
9553 GET_MODE (ix86_compare_op0),
9554 ix86_compare_op0, ix86_compare_op1);
9555 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
9558 return 1; /* DONE */
9561 /* Expand comparison setting or clearing carry flag. Return true when
9562 successful and set pop for the operation. */
9564 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
9566 enum machine_mode mode =
9567 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
9569 /* Do not handle DImode compares that go trought special path. Also we can't
9570 deal with FP compares yet. This is possible to add. */
9571 if ((mode == DImode && !TARGET_64BIT))
9573 if (FLOAT_MODE_P (mode))
9575 rtx second_test = NULL, bypass_test = NULL;
9576 rtx compare_op, compare_seq;
9578 /* Shortcut: following common codes never translate into carry flag compares. */
9579 if (code == EQ || code == NE || code == UNEQ || code == LTGT
9580 || code == ORDERED || code == UNORDERED)
9583 /* These comparisons require zero flag; swap operands so they won't. */
9584 if ((code == GT || code == UNLE || code == LE || code == UNGT)
9590 code = swap_condition (code);
9593 /* Try to expand the comparison and verify that we end up with carry flag
9594 based comparison. This is fails to be true only when we decide to expand
9595 comparison using arithmetic that is not too common scenario. */
9597 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9598 &second_test, &bypass_test);
9599 compare_seq = get_insns ();
9602 if (second_test || bypass_test)
9604 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9605 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9606 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
9608 code = GET_CODE (compare_op);
9609 if (code != LTU && code != GEU)
9611 emit_insn (compare_seq);
9615 if (!INTEGRAL_MODE_P (mode))
9623 /* Convert a==0 into (unsigned)a<1. */
9626 if (op1 != const0_rtx)
9629 code = (code == EQ ? LTU : GEU);
9632 /* Convert a>b into b<a or a>=b-1. */
9635 if (GET_CODE (op1) == CONST_INT)
9637 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
9638 /* Bail out on overflow. We still can swap operands but that
9639 would force loading of the constant into register. */
9640 if (op1 == const0_rtx
9641 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
9643 code = (code == GTU ? GEU : LTU);
9650 code = (code == GTU ? LTU : GEU);
9654 /* Convert a>=0 into (unsigned)a<0x80000000. */
9657 if (mode == DImode || op1 != const0_rtx)
9659 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9660 code = (code == LT ? GEU : LTU);
9664 if (mode == DImode || op1 != constm1_rtx)
9666 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9667 code = (code == LE ? GEU : LTU);
9673 /* Swapping operands may cause constant to appear as first operand. */
9674 if (!nonimmediate_operand (op0, VOIDmode))
9678 op0 = force_reg (mode, op0);
9680 ix86_compare_op0 = op0;
9681 ix86_compare_op1 = op1;
9682 *pop = ix86_expand_compare (code, NULL, NULL);
9683 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
9688 ix86_expand_int_movcc (rtx operands[])
9690 enum rtx_code code = GET_CODE (operands[1]), compare_code;
9691 rtx compare_seq, compare_op;
9692 rtx second_test, bypass_test;
9693 enum machine_mode mode = GET_MODE (operands[0]);
9694 bool sign_bit_compare_p = false;;
9697 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9698 compare_seq = get_insns ();
9701 compare_code = GET_CODE (compare_op);
9703 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
9704 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
9705 sign_bit_compare_p = true;
9707 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
9708 HImode insns, we'd be swallowed in word prefix ops. */
9710 if ((mode != HImode || TARGET_FAST_PREFIX)
9711 && (mode != DImode || TARGET_64BIT)
9712 && GET_CODE (operands[2]) == CONST_INT
9713 && GET_CODE (operands[3]) == CONST_INT)
9715 rtx out = operands[0];
9716 HOST_WIDE_INT ct = INTVAL (operands[2]);
9717 HOST_WIDE_INT cf = INTVAL (operands[3]);
9721 /* Sign bit compares are better done using shifts than we do by using
9723 if (sign_bit_compare_p
9724 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9725 ix86_compare_op1, &compare_op))
9727 /* Detect overlap between destination and compare sources. */
9730 if (!sign_bit_compare_p)
9734 compare_code = GET_CODE (compare_op);
9736 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9737 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9740 compare_code = ix86_fp_compare_code_to_integer (compare_code);
9743 /* To simplify rest of code, restrict to the GEU case. */
9744 if (compare_code == LTU)
9746 HOST_WIDE_INT tmp = ct;
9749 compare_code = reverse_condition (compare_code);
9750 code = reverse_condition (code);
9755 PUT_CODE (compare_op,
9756 reverse_condition_maybe_unordered
9757 (GET_CODE (compare_op)));
9759 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9763 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
9764 || reg_overlap_mentioned_p (out, ix86_compare_op1))
9765 tmp = gen_reg_rtx (mode);
9768 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
9770 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
9774 if (code == GT || code == GE)
9775 code = reverse_condition (code);
9778 HOST_WIDE_INT tmp = ct;
9783 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
9784 ix86_compare_op1, VOIDmode, 0, -1);
9797 tmp = expand_simple_binop (mode, PLUS,
9799 copy_rtx (tmp), 1, OPTAB_DIRECT);
9810 tmp = expand_simple_binop (mode, IOR,
9812 copy_rtx (tmp), 1, OPTAB_DIRECT);
9814 else if (diff == -1 && ct)
9824 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9826 tmp = expand_simple_binop (mode, PLUS,
9827 copy_rtx (tmp), GEN_INT (cf),
9828 copy_rtx (tmp), 1, OPTAB_DIRECT);
9836 * andl cf - ct, dest
9846 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9849 tmp = expand_simple_binop (mode, AND,
9851 gen_int_mode (cf - ct, mode),
9852 copy_rtx (tmp), 1, OPTAB_DIRECT);
9854 tmp = expand_simple_binop (mode, PLUS,
9855 copy_rtx (tmp), GEN_INT (ct),
9856 copy_rtx (tmp), 1, OPTAB_DIRECT);
9859 if (!rtx_equal_p (tmp, out))
9860 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
9862 return 1; /* DONE */
9868 tmp = ct, ct = cf, cf = tmp;
9870 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9872 /* We may be reversing unordered compare to normal compare, that
9873 is not valid in general (we may convert non-trapping condition
9874 to trapping one), however on i386 we currently emit all
9875 comparisons unordered. */
9876 compare_code = reverse_condition_maybe_unordered (compare_code);
9877 code = reverse_condition_maybe_unordered (code);
9881 compare_code = reverse_condition (compare_code);
9882 code = reverse_condition (code);
9886 compare_code = UNKNOWN;
9887 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
9888 && GET_CODE (ix86_compare_op1) == CONST_INT)
9890 if (ix86_compare_op1 == const0_rtx
9891 && (code == LT || code == GE))
9892 compare_code = code;
9893 else if (ix86_compare_op1 == constm1_rtx)
9897 else if (code == GT)
9902 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9903 if (compare_code != UNKNOWN
9904 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
9905 && (cf == -1 || ct == -1))
9907 /* If lea code below could be used, only optimize
9908 if it results in a 2 insn sequence. */
9910 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
9911 || diff == 3 || diff == 5 || diff == 9)
9912 || (compare_code == LT && ct == -1)
9913 || (compare_code == GE && cf == -1))
9916 * notl op1 (if necessary)
9924 code = reverse_condition (code);
9927 out = emit_store_flag (out, code, ix86_compare_op0,
9928 ix86_compare_op1, VOIDmode, 0, -1);
9930 out = expand_simple_binop (mode, IOR,
9932 out, 1, OPTAB_DIRECT);
9933 if (out != operands[0])
9934 emit_move_insn (operands[0], out);
9936 return 1; /* DONE */
9941 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
9942 || diff == 3 || diff == 5 || diff == 9)
9943 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
9945 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
9951 * lea cf(dest*(ct-cf)),dest
9955 * This also catches the degenerate setcc-only case.
9961 out = emit_store_flag (out, code, ix86_compare_op0,
9962 ix86_compare_op1, VOIDmode, 0, 1);
9965 /* On x86_64 the lea instruction operates on Pmode, so we need
9966 to get arithmetics done in proper mode to match. */
9968 tmp = copy_rtx (out);
9972 out1 = copy_rtx (out);
9973 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
9977 tmp = gen_rtx_PLUS (mode, tmp, out1);
9983 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
9986 if (!rtx_equal_p (tmp, out))
9989 out = force_operand (tmp, copy_rtx (out));
9991 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
9993 if (!rtx_equal_p (out, operands[0]))
9994 emit_move_insn (operands[0], copy_rtx (out));
9996 return 1; /* DONE */
10000 * General case: Jumpful:
10001 * xorl dest,dest cmpl op1, op2
10002 * cmpl op1, op2 movl ct, dest
10003 * setcc dest jcc 1f
10004 * decl dest movl cf, dest
10005 * andl (cf-ct),dest 1:
10008 * Size 20. Size 14.
10010 * This is reasonably steep, but branch mispredict costs are
10011 * high on modern cpus, so consider failing only if optimizing
10015 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10016 && BRANCH_COST >= 2)
10022 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10023 /* We may be reversing unordered compare to normal compare,
10024 that is not valid in general (we may convert non-trapping
10025 condition to trapping one), however on i386 we currently
10026 emit all comparisons unordered. */
10027 code = reverse_condition_maybe_unordered (code);
10030 code = reverse_condition (code);
10031 if (compare_code != UNKNOWN)
10032 compare_code = reverse_condition (compare_code);
10036 if (compare_code != UNKNOWN)
10038 /* notl op1 (if needed)
10043 For x < 0 (resp. x <= -1) there will be no notl,
10044 so if possible swap the constants to get rid of the
10046 True/false will be -1/0 while code below (store flag
10047 followed by decrement) is 0/-1, so the constants need
10048 to be exchanged once more. */
10050 if (compare_code == GE || !cf)
10052 code = reverse_condition (code);
10057 HOST_WIDE_INT tmp = cf;
10062 out = emit_store_flag (out, code, ix86_compare_op0,
10063 ix86_compare_op1, VOIDmode, 0, -1);
10067 out = emit_store_flag (out, code, ix86_compare_op0,
10068 ix86_compare_op1, VOIDmode, 0, 1);
10070 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
10071 copy_rtx (out), 1, OPTAB_DIRECT);
10074 out = expand_simple_binop (mode, AND, copy_rtx (out),
10075 gen_int_mode (cf - ct, mode),
10076 copy_rtx (out), 1, OPTAB_DIRECT);
10078 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
10079 copy_rtx (out), 1, OPTAB_DIRECT);
10080 if (!rtx_equal_p (out, operands[0]))
10081 emit_move_insn (operands[0], copy_rtx (out));
10083 return 1; /* DONE */
10087 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10089 /* Try a few things more with specific constants and a variable. */
10092 rtx var, orig_out, out, tmp;
10094 if (BRANCH_COST <= 2)
10095 return 0; /* FAIL */
10097 /* If one of the two operands is an interesting constant, load a
10098 constant with the above and mask it in with a logical operation. */
10100 if (GET_CODE (operands[2]) == CONST_INT)
10103 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
10104 operands[3] = constm1_rtx, op = and_optab;
10105 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
10106 operands[3] = const0_rtx, op = ior_optab;
10108 return 0; /* FAIL */
10110 else if (GET_CODE (operands[3]) == CONST_INT)
10113 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
10114 operands[2] = constm1_rtx, op = and_optab;
10115 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
10116 operands[2] = const0_rtx, op = ior_optab;
10118 return 0; /* FAIL */
10121 return 0; /* FAIL */
10123 orig_out = operands[0];
10124 tmp = gen_reg_rtx (mode);
10127 /* Recurse to get the constant loaded. */
10128 if (ix86_expand_int_movcc (operands) == 0)
10129 return 0; /* FAIL */
10131 /* Mask in the interesting variable. */
10132 out = expand_binop (mode, op, var, tmp, orig_out, 0,
10134 if (!rtx_equal_p (out, orig_out))
10135 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
10137 return 1; /* DONE */
10141 * For comparison with above,
10151 if (! nonimmediate_operand (operands[2], mode))
10152 operands[2] = force_reg (mode, operands[2]);
10153 if (! nonimmediate_operand (operands[3], mode))
10154 operands[3] = force_reg (mode, operands[3]);
10156 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10158 rtx tmp = gen_reg_rtx (mode);
10159 emit_move_insn (tmp, operands[3]);
10162 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10164 rtx tmp = gen_reg_rtx (mode);
10165 emit_move_insn (tmp, operands[2]);
10169 if (! register_operand (operands[2], VOIDmode)
10171 || ! register_operand (operands[3], VOIDmode)))
10172 operands[2] = force_reg (mode, operands[2]);
10175 && ! register_operand (operands[3], VOIDmode))
10176 operands[3] = force_reg (mode, operands[3]);
10178 emit_insn (compare_seq);
10179 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10180 gen_rtx_IF_THEN_ELSE (mode,
10181 compare_op, operands[2],
10184 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10185 gen_rtx_IF_THEN_ELSE (mode,
10187 copy_rtx (operands[3]),
10188 copy_rtx (operands[0]))));
10190 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10191 gen_rtx_IF_THEN_ELSE (mode,
10193 copy_rtx (operands[2]),
10194 copy_rtx (operands[0]))));
10196 return 1; /* DONE */
10199 /* Swap, force into registers, or otherwise massage the two operands
10200 to an sse comparison with a mask result. Thus we differ a bit from
10201 ix86_prepare_fp_compare_args which expects to produce a flags result.
10203 The DEST operand exists to help determine whether to commute commutative
10204 operators. The POP0/POP1 operands are updated in place. The new
10205 comparison code is returned, or UNKNOWN if not implementable. */
10207 static enum rtx_code
10208 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
10209 rtx *pop0, rtx *pop1)
10217 /* We have no LTGT as an operator. We could implement it with
10218 NE & ORDERED, but this requires an extra temporary. It's
10219 not clear that it's worth it. */
10226 /* These are supported directly. */
10233 /* For commutative operators, try to canonicalize the destination
10234 operand to be first in the comparison - this helps reload to
10235 avoid extra moves. */
10236 if (!dest || !rtx_equal_p (dest, *pop1))
10244 /* These are not supported directly. Swap the comparison operands
10245 to transform into something that is supported. */
10249 code = swap_condition (code);
10253 gcc_unreachable ();
10259 /* Detect conditional moves that exactly match min/max operational
10260 semantics. Note that this is IEEE safe, as long as we don't
10261 interchange the operands.
10263 Returns FALSE if this conditional move doesn't match a MIN/MAX,
10264 and TRUE if the operation is successful and instructions are emitted. */
10267 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
10268 rtx cmp_op1, rtx if_true, rtx if_false)
10270 enum machine_mode mode;
10276 else if (code == UNGE)
10279 if_true = if_false;
10285 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
10287 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
10292 mode = GET_MODE (dest);
10294 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
10295 but MODE may be a vector mode and thus not appropriate. */
10296 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
10298 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
10301 if_true = force_reg (mode, if_true);
10302 v = gen_rtvec (2, if_true, if_false);
10303 tmp = gen_rtx_UNSPEC (mode, v, u);
10307 code = is_min ? SMIN : SMAX;
10308 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
10311 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
10315 /* Expand an sse vector comparison. Return the register with the result. */
10318 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
10319 rtx op_true, rtx op_false)
10321 enum machine_mode mode = GET_MODE (dest);
10324 cmp_op0 = force_reg (mode, cmp_op0);
10325 if (!nonimmediate_operand (cmp_op1, mode))
10326 cmp_op1 = force_reg (mode, cmp_op1);
10329 || reg_overlap_mentioned_p (dest, op_true)
10330 || reg_overlap_mentioned_p (dest, op_false))
10331 dest = gen_reg_rtx (mode);
10333 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
10334 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10339 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
10340 operations. This is used for both scalar and vector conditional moves. */
10343 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
10345 enum machine_mode mode = GET_MODE (dest);
10348 if (op_false == CONST0_RTX (mode))
10350 op_true = force_reg (mode, op_true);
10351 x = gen_rtx_AND (mode, cmp, op_true);
10352 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10354 else if (op_true == CONST0_RTX (mode))
10356 op_false = force_reg (mode, op_false);
10357 x = gen_rtx_NOT (mode, cmp);
10358 x = gen_rtx_AND (mode, x, op_false);
10359 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10363 op_true = force_reg (mode, op_true);
10364 op_false = force_reg (mode, op_false);
10366 t2 = gen_reg_rtx (mode);
10368 t3 = gen_reg_rtx (mode);
10372 x = gen_rtx_AND (mode, op_true, cmp);
10373 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
10375 x = gen_rtx_NOT (mode, cmp);
10376 x = gen_rtx_AND (mode, x, op_false);
10377 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
10379 x = gen_rtx_IOR (mode, t3, t2);
10380 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10384 /* Expand a floating-point conditional move. Return true if successful. */
10387 ix86_expand_fp_movcc (rtx operands[])
10389 enum machine_mode mode = GET_MODE (operands[0]);
10390 enum rtx_code code = GET_CODE (operands[1]);
10391 rtx tmp, compare_op, second_test, bypass_test;
10393 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
10395 enum machine_mode cmode;
10397 /* Since we've no cmove for sse registers, don't force bad register
10398 allocation just to gain access to it. Deny movcc when the
10399 comparison mode doesn't match the move mode. */
10400 cmode = GET_MODE (ix86_compare_op0);
10401 if (cmode == VOIDmode)
10402 cmode = GET_MODE (ix86_compare_op1);
10406 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10408 &ix86_compare_op1);
10409 if (code == UNKNOWN)
10412 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
10413 ix86_compare_op1, operands[2],
10417 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
10418 ix86_compare_op1, operands[2], operands[3]);
10419 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
10423 /* The floating point conditional move instructions don't directly
10424 support conditions resulting from a signed integer comparison. */
10426 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10428 /* The floating point conditional move instructions don't directly
10429 support signed integer comparisons. */
10431 if (!fcmov_comparison_operator (compare_op, VOIDmode))
10433 gcc_assert (!second_test && !bypass_test);
10434 tmp = gen_reg_rtx (QImode);
10435 ix86_expand_setcc (code, tmp);
10437 ix86_compare_op0 = tmp;
10438 ix86_compare_op1 = const0_rtx;
10439 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10441 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10443 tmp = gen_reg_rtx (mode);
10444 emit_move_insn (tmp, operands[3]);
10447 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10449 tmp = gen_reg_rtx (mode);
10450 emit_move_insn (tmp, operands[2]);
10454 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10455 gen_rtx_IF_THEN_ELSE (mode, compare_op,
10456 operands[2], operands[3])));
10458 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10459 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
10460 operands[3], operands[0])));
10462 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10463 gen_rtx_IF_THEN_ELSE (mode, second_test,
10464 operands[2], operands[0])));
10469 /* Expand a floating-point vector conditional move; a vcond operation
10470 rather than a movcc operation. */
10473 ix86_expand_fp_vcond (rtx operands[])
10475 enum rtx_code code = GET_CODE (operands[3]);
10478 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10479 &operands[4], &operands[5]);
10480 if (code == UNKNOWN)
10483 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
10484 operands[5], operands[1], operands[2]))
10487 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
10488 operands[1], operands[2]);
10489 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
10493 /* Expand a signed integral vector conditional move. */
10496 ix86_expand_int_vcond (rtx operands[], bool unsignedp)
10498 enum machine_mode mode = GET_MODE (operands[0]);
10499 enum rtx_code code = GET_CODE (operands[3]);
10503 code = signed_condition (code);
10504 if (code == NE || code == LE || code == GE)
10506 /* Inverse of a supported code. */
10508 operands[1] = operands[2];
10510 code = reverse_condition (code);
10514 /* Swap of a supported code. */
10516 operands[4] = operands[5];
10518 code = swap_condition (code);
10520 gcc_assert (code == EQ || code == GT);
10522 /* Unlike floating-point, we can rely on the optimizers to have already
10523 converted to MIN/MAX expressions, so we don't have to handle that. */
10525 /* Unsigned GT is not directly supported. We can zero-extend QI and
10526 HImode elements to the next wider element size, use a signed compare,
10527 then repack. For three extra instructions, this is definitely a win. */
10528 if (code == GT && unsignedp)
10530 rtx o0l, o0h, o1l, o1h, cl, ch, zero;
10531 enum machine_mode wider;
10532 rtx (*unpackl) (rtx, rtx, rtx);
10533 rtx (*unpackh) (rtx, rtx, rtx);
10534 rtx (*pack) (rtx, rtx, rtx);
10540 unpackl = gen_sse2_punpcklbw;
10541 unpackh = gen_sse2_punpckhbw;
10542 pack = gen_sse2_packsswb;
10546 unpackl = gen_sse2_punpcklwd;
10547 unpackh = gen_sse2_punpckhwd;
10548 pack = gen_sse2_packssdw;
10551 gcc_unreachable ();
10554 operands[4] = force_reg (mode, operands[4]);
10555 operands[5] = force_reg (mode, operands[5]);
10557 o0l = gen_reg_rtx (wider);
10558 o0h = gen_reg_rtx (wider);
10559 o1l = gen_reg_rtx (wider);
10560 o1h = gen_reg_rtx (wider);
10561 cl = gen_reg_rtx (wider);
10562 ch = gen_reg_rtx (wider);
10563 cmp = gen_reg_rtx (mode);
10564 zero = force_reg (mode, CONST0_RTX (mode));
10566 emit_insn (unpackl (gen_lowpart (mode, o0l), operands[4], zero));
10567 emit_insn (unpackh (gen_lowpart (mode, o0h), operands[4], zero));
10568 emit_insn (unpackl (gen_lowpart (mode, o1l), operands[5], zero));
10569 emit_insn (unpackh (gen_lowpart (mode, o1h), operands[5], zero));
10571 x = gen_rtx_GT (wider, o0l, o1l);
10572 emit_insn (gen_rtx_SET (VOIDmode, cl, x));
10574 x = gen_rtx_GT (wider, o0h, o1h);
10575 emit_insn (gen_rtx_SET (VOIDmode, ch, x));
10577 emit_insn (pack (cmp, cl, ch));
10580 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
10581 operands[1], operands[2]);
10583 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
10587 /* Expand conditional increment or decrement using adb/sbb instructions.
10588 The default case using setcc followed by the conditional move can be
10589 done by generic code. */
10591 ix86_expand_int_addcc (rtx operands[])
10593 enum rtx_code code = GET_CODE (operands[1]);
10595 rtx val = const0_rtx;
10596 bool fpcmp = false;
10597 enum machine_mode mode = GET_MODE (operands[0]);
10599 if (operands[3] != const1_rtx
10600 && operands[3] != constm1_rtx)
10602 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10603 ix86_compare_op1, &compare_op))
10605 code = GET_CODE (compare_op);
10607 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10608 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10611 code = ix86_fp_compare_code_to_integer (code);
10618 PUT_CODE (compare_op,
10619 reverse_condition_maybe_unordered
10620 (GET_CODE (compare_op)));
10622 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10624 PUT_MODE (compare_op, mode);
10626 /* Construct either adc or sbb insn. */
10627 if ((code == LTU) == (operands[3] == constm1_rtx))
10629 switch (GET_MODE (operands[0]))
10632 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
10635 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
10638 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
10641 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10644 gcc_unreachable ();
10649 switch (GET_MODE (operands[0]))
10652 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
10655 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
10658 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
10661 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10664 gcc_unreachable ();
10667 return 1; /* DONE */
10671 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
10672 works for floating pointer parameters and nonoffsetable memories.
10673 For pushes, it returns just stack offsets; the values will be saved
10674 in the right order. Maximally three parts are generated. */
10677 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
10682 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
10684 size = (GET_MODE_SIZE (mode) + 4) / 8;
10686 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
10687 gcc_assert (size >= 2 && size <= 3);
10689 /* Optimize constant pool reference to immediates. This is used by fp
10690 moves, that force all constants to memory to allow combining. */
10691 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
10693 rtx tmp = maybe_get_pool_constant (operand);
10698 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
10700 /* The only non-offsetable memories we handle are pushes. */
10701 int ok = push_operand (operand, VOIDmode);
10705 operand = copy_rtx (operand);
10706 PUT_MODE (operand, Pmode);
10707 parts[0] = parts[1] = parts[2] = operand;
10711 if (GET_CODE (operand) == CONST_VECTOR)
10713 enum machine_mode imode = int_mode_for_mode (mode);
10714 /* Caution: if we looked through a constant pool memory above,
10715 the operand may actually have a different mode now. That's
10716 ok, since we want to pun this all the way back to an integer. */
10717 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
10718 gcc_assert (operand != NULL);
10724 if (mode == DImode)
10725 split_di (&operand, 1, &parts[0], &parts[1]);
10728 if (REG_P (operand))
10730 gcc_assert (reload_completed);
10731 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
10732 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
10734 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
10736 else if (offsettable_memref_p (operand))
10738 operand = adjust_address (operand, SImode, 0);
10739 parts[0] = operand;
10740 parts[1] = adjust_address (operand, SImode, 4);
10742 parts[2] = adjust_address (operand, SImode, 8);
10744 else if (GET_CODE (operand) == CONST_DOUBLE)
10749 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10753 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
10754 parts[2] = gen_int_mode (l[2], SImode);
10757 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
10760 gcc_unreachable ();
10762 parts[1] = gen_int_mode (l[1], SImode);
10763 parts[0] = gen_int_mode (l[0], SImode);
10766 gcc_unreachable ();
10771 if (mode == TImode)
10772 split_ti (&operand, 1, &parts[0], &parts[1]);
10773 if (mode == XFmode || mode == TFmode)
10775 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
10776 if (REG_P (operand))
10778 gcc_assert (reload_completed);
10779 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
10780 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
10782 else if (offsettable_memref_p (operand))
10784 operand = adjust_address (operand, DImode, 0);
10785 parts[0] = operand;
10786 parts[1] = adjust_address (operand, upper_mode, 8);
10788 else if (GET_CODE (operand) == CONST_DOUBLE)
10793 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10794 real_to_target (l, &r, mode);
10796 /* Do not use shift by 32 to avoid warning on 32bit systems. */
10797 if (HOST_BITS_PER_WIDE_INT >= 64)
10800 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
10801 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
10804 parts[0] = immed_double_const (l[0], l[1], DImode);
10806 if (upper_mode == SImode)
10807 parts[1] = gen_int_mode (l[2], SImode);
10808 else if (HOST_BITS_PER_WIDE_INT >= 64)
10811 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
10812 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
10815 parts[1] = immed_double_const (l[2], l[3], DImode);
10818 gcc_unreachable ();
10825 /* Emit insns to perform a move or push of DI, DF, and XF values.
10826 Return false when normal moves are needed; true when all required
10827 insns have been emitted. Operands 2-4 contain the input values
10828 int the correct order; operands 5-7 contain the output values. */
10831 ix86_split_long_move (rtx operands[])
10836 int collisions = 0;
10837 enum machine_mode mode = GET_MODE (operands[0]);
10839 /* The DFmode expanders may ask us to move double.
10840 For 64bit target this is single move. By hiding the fact
10841 here we simplify i386.md splitters. */
10842 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
10844 /* Optimize constant pool reference to immediates. This is used by
10845 fp moves, that force all constants to memory to allow combining. */
10847 if (GET_CODE (operands[1]) == MEM
10848 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10849 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
10850 operands[1] = get_pool_constant (XEXP (operands[1], 0));
10851 if (push_operand (operands[0], VOIDmode))
10853 operands[0] = copy_rtx (operands[0]);
10854 PUT_MODE (operands[0], Pmode);
10857 operands[0] = gen_lowpart (DImode, operands[0]);
10858 operands[1] = gen_lowpart (DImode, operands[1]);
10859 emit_move_insn (operands[0], operands[1]);
10863 /* The only non-offsettable memory we handle is push. */
10864 if (push_operand (operands[0], VOIDmode))
10867 gcc_assert (GET_CODE (operands[0]) != MEM
10868 || offsettable_memref_p (operands[0]));
10870 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
10871 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
10873 /* When emitting push, take care for source operands on the stack. */
10874 if (push && GET_CODE (operands[1]) == MEM
10875 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
10878 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
10879 XEXP (part[1][2], 0));
10880 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
10881 XEXP (part[1][1], 0));
10884 /* We need to do copy in the right order in case an address register
10885 of the source overlaps the destination. */
10886 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
10888 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
10890 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10893 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
10896 /* Collision in the middle part can be handled by reordering. */
10897 if (collisions == 1 && nparts == 3
10898 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10901 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
10902 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
10905 /* If there are more collisions, we can't handle it by reordering.
10906 Do an lea to the last part and use only one colliding move. */
10907 else if (collisions > 1)
10913 base = part[0][nparts - 1];
10915 /* Handle the case when the last part isn't valid for lea.
10916 Happens in 64-bit mode storing the 12-byte XFmode. */
10917 if (GET_MODE (base) != Pmode)
10918 base = gen_rtx_REG (Pmode, REGNO (base));
10920 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
10921 part[1][0] = replace_equiv_address (part[1][0], base);
10922 part[1][1] = replace_equiv_address (part[1][1],
10923 plus_constant (base, UNITS_PER_WORD));
10925 part[1][2] = replace_equiv_address (part[1][2],
10926 plus_constant (base, 8));
10936 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
10937 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
10938 emit_move_insn (part[0][2], part[1][2]);
10943 /* In 64bit mode we don't have 32bit push available. In case this is
10944 register, it is OK - we will just use larger counterpart. We also
10945 retype memory - these comes from attempt to avoid REX prefix on
10946 moving of second half of TFmode value. */
10947 if (GET_MODE (part[1][1]) == SImode)
10949 switch (GET_CODE (part[1][1]))
10952 part[1][1] = adjust_address (part[1][1], DImode, 0);
10956 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
10960 gcc_unreachable ();
10963 if (GET_MODE (part[1][0]) == SImode)
10964 part[1][0] = part[1][1];
10967 emit_move_insn (part[0][1], part[1][1]);
10968 emit_move_insn (part[0][0], part[1][0]);
10972 /* Choose correct order to not overwrite the source before it is copied. */
10973 if ((REG_P (part[0][0])
10974 && REG_P (part[1][1])
10975 && (REGNO (part[0][0]) == REGNO (part[1][1])
10977 && REGNO (part[0][0]) == REGNO (part[1][2]))))
10979 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
10983 operands[2] = part[0][2];
10984 operands[3] = part[0][1];
10985 operands[4] = part[0][0];
10986 operands[5] = part[1][2];
10987 operands[6] = part[1][1];
10988 operands[7] = part[1][0];
10992 operands[2] = part[0][1];
10993 operands[3] = part[0][0];
10994 operands[5] = part[1][1];
10995 operands[6] = part[1][0];
11002 operands[2] = part[0][0];
11003 operands[3] = part[0][1];
11004 operands[4] = part[0][2];
11005 operands[5] = part[1][0];
11006 operands[6] = part[1][1];
11007 operands[7] = part[1][2];
11011 operands[2] = part[0][0];
11012 operands[3] = part[0][1];
11013 operands[5] = part[1][0];
11014 operands[6] = part[1][1];
11018 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
11021 if (GET_CODE (operands[5]) == CONST_INT
11022 && operands[5] != const0_rtx
11023 && REG_P (operands[2]))
11025 if (GET_CODE (operands[6]) == CONST_INT
11026 && INTVAL (operands[6]) == INTVAL (operands[5]))
11027 operands[6] = operands[2];
11030 && GET_CODE (operands[7]) == CONST_INT
11031 && INTVAL (operands[7]) == INTVAL (operands[5]))
11032 operands[7] = operands[2];
11036 && GET_CODE (operands[6]) == CONST_INT
11037 && operands[6] != const0_rtx
11038 && REG_P (operands[3])
11039 && GET_CODE (operands[7]) == CONST_INT
11040 && INTVAL (operands[7]) == INTVAL (operands[6]))
11041 operands[7] = operands[3];
11044 emit_move_insn (operands[2], operands[5]);
11045 emit_move_insn (operands[3], operands[6]);
11047 emit_move_insn (operands[4], operands[7]);
11052 /* Helper function of ix86_split_ashldi used to generate an SImode
11053 left shift by a constant, either using a single shift or
11054 a sequence of add instructions. */
11057 ix86_expand_ashlsi3_const (rtx operand, int count)
11060 emit_insn (gen_addsi3 (operand, operand, operand));
11061 else if (!optimize_size
11062 && count * ix86_cost->add <= ix86_cost->shift_const)
11065 for (i=0; i<count; i++)
11066 emit_insn (gen_addsi3 (operand, operand, operand));
11069 emit_insn (gen_ashlsi3 (operand, operand, GEN_INT (count)));
11073 ix86_split_ashldi (rtx *operands, rtx scratch)
11075 rtx low[2], high[2];
11078 if (GET_CODE (operands[2]) == CONST_INT)
11080 split_di (operands, 2, low, high);
11081 count = INTVAL (operands[2]) & 63;
11085 emit_move_insn (high[0], low[1]);
11086 emit_move_insn (low[0], const0_rtx);
11089 ix86_expand_ashlsi3_const (high[0], count - 32);
11093 if (!rtx_equal_p (operands[0], operands[1]))
11094 emit_move_insn (operands[0], operands[1]);
11095 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
11096 ix86_expand_ashlsi3_const (low[0], count);
11101 split_di (operands, 1, low, high);
11103 if (operands[1] == const1_rtx)
11105 /* Assuming we've chosen a QImode capable registers, then 1LL << N
11106 can be done with two 32-bit shifts, no branches, no cmoves. */
11107 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
11109 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
11111 ix86_expand_clear (low[0]);
11112 ix86_expand_clear (high[0]);
11113 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
11115 d = gen_lowpart (QImode, low[0]);
11116 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11117 s = gen_rtx_EQ (QImode, flags, const0_rtx);
11118 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11120 d = gen_lowpart (QImode, high[0]);
11121 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11122 s = gen_rtx_NE (QImode, flags, const0_rtx);
11123 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11126 /* Otherwise, we can get the same results by manually performing
11127 a bit extract operation on bit 5, and then performing the two
11128 shifts. The two methods of getting 0/1 into low/high are exactly
11129 the same size. Avoiding the shift in the bit extract case helps
11130 pentium4 a bit; no one else seems to care much either way. */
11135 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
11136 x = gen_rtx_ZERO_EXTEND (SImode, operands[2]);
11138 x = gen_lowpart (SImode, operands[2]);
11139 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
11141 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (5)));
11142 emit_insn (gen_andsi3 (high[0], high[0], GEN_INT (1)));
11143 emit_move_insn (low[0], high[0]);
11144 emit_insn (gen_xorsi3 (low[0], low[0], GEN_INT (1)));
11147 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
11148 emit_insn (gen_ashlsi3 (high[0], high[0], operands[2]));
11152 if (operands[1] == constm1_rtx)
11154 /* For -1LL << N, we can avoid the shld instruction, because we
11155 know that we're shifting 0...31 ones into a -1. */
11156 emit_move_insn (low[0], constm1_rtx);
11158 emit_move_insn (high[0], low[0]);
11160 emit_move_insn (high[0], constm1_rtx);
11164 if (!rtx_equal_p (operands[0], operands[1]))
11165 emit_move_insn (operands[0], operands[1]);
11167 split_di (operands, 1, low, high);
11168 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
11171 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
11173 if (TARGET_CMOVE && scratch)
11175 ix86_expand_clear (scratch);
11176 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
11179 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
11183 ix86_split_ashrdi (rtx *operands, rtx scratch)
11185 rtx low[2], high[2];
11188 if (GET_CODE (operands[2]) == CONST_INT)
11190 split_di (operands, 2, low, high);
11191 count = INTVAL (operands[2]) & 63;
11195 emit_move_insn (high[0], high[1]);
11196 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
11197 emit_move_insn (low[0], high[0]);
11200 else if (count >= 32)
11202 emit_move_insn (low[0], high[1]);
11203 emit_move_insn (high[0], low[0]);
11204 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
11206 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
11210 if (!rtx_equal_p (operands[0], operands[1]))
11211 emit_move_insn (operands[0], operands[1]);
11212 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
11213 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
11218 if (!rtx_equal_p (operands[0], operands[1]))
11219 emit_move_insn (operands[0], operands[1]);
11221 split_di (operands, 1, low, high);
11223 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
11224 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
11226 if (TARGET_CMOVE && scratch)
11228 emit_move_insn (scratch, high[0]);
11229 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
11230 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
11234 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
11239 ix86_split_lshrdi (rtx *operands, rtx scratch)
11241 rtx low[2], high[2];
11244 if (GET_CODE (operands[2]) == CONST_INT)
11246 split_di (operands, 2, low, high);
11247 count = INTVAL (operands[2]) & 63;
11251 emit_move_insn (low[0], high[1]);
11252 ix86_expand_clear (high[0]);
11255 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
11259 if (!rtx_equal_p (operands[0], operands[1]))
11260 emit_move_insn (operands[0], operands[1]);
11261 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
11262 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
11267 if (!rtx_equal_p (operands[0], operands[1]))
11268 emit_move_insn (operands[0], operands[1]);
11270 split_di (operands, 1, low, high);
11272 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
11273 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
11275 /* Heh. By reversing the arguments, we can reuse this pattern. */
11276 if (TARGET_CMOVE && scratch)
11278 ix86_expand_clear (scratch);
11279 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
11283 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
11287 /* Helper function for the string operations below. Dest VARIABLE whether
11288 it is aligned to VALUE bytes. If true, jump to the label. */
11290 ix86_expand_aligntest (rtx variable, int value)
11292 rtx label = gen_label_rtx ();
11293 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
11294 if (GET_MODE (variable) == DImode)
11295 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
11297 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
11298 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
11303 /* Adjust COUNTER by the VALUE. */
11305 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
11307 if (GET_MODE (countreg) == DImode)
11308 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
11310 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
11313 /* Zero extend possibly SImode EXP to Pmode register. */
11315 ix86_zero_extend_to_Pmode (rtx exp)
11318 if (GET_MODE (exp) == VOIDmode)
11319 return force_reg (Pmode, exp);
11320 if (GET_MODE (exp) == Pmode)
11321 return copy_to_mode_reg (Pmode, exp);
11322 r = gen_reg_rtx (Pmode);
11323 emit_insn (gen_zero_extendsidi2 (r, exp));
11327 /* Expand string move (memcpy) operation. Use i386 string operations when
11328 profitable. expand_clrmem contains similar code. */
11330 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
11332 rtx srcreg, destreg, countreg, srcexp, destexp;
11333 enum machine_mode counter_mode;
11334 HOST_WIDE_INT align = 0;
11335 unsigned HOST_WIDE_INT count = 0;
11337 if (GET_CODE (align_exp) == CONST_INT)
11338 align = INTVAL (align_exp);
11340 /* Can't use any of this if the user has appropriated esi or edi. */
11341 if (global_regs[4] || global_regs[5])
11344 /* This simple hack avoids all inlining code and simplifies code below. */
11345 if (!TARGET_ALIGN_STRINGOPS)
11348 if (GET_CODE (count_exp) == CONST_INT)
11350 count = INTVAL (count_exp);
11351 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11355 /* Figure out proper mode for counter. For 32bits it is always SImode,
11356 for 64bits use SImode when possible, otherwise DImode.
11357 Set count to number of bytes copied when known at compile time. */
11359 || GET_MODE (count_exp) == SImode
11360 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
11361 counter_mode = SImode;
11363 counter_mode = DImode;
11365 gcc_assert (counter_mode == SImode || counter_mode == DImode);
11367 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
11368 if (destreg != XEXP (dst, 0))
11369 dst = replace_equiv_address_nv (dst, destreg);
11370 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
11371 if (srcreg != XEXP (src, 0))
11372 src = replace_equiv_address_nv (src, srcreg);
11374 /* When optimizing for size emit simple rep ; movsb instruction for
11375 counts not divisible by 4, except when (movsl;)*(movsw;)?(movsb;)?
11376 sequence is shorter than mov{b,l} $count, %{ecx,cl}; rep; movsb.
11377 Sice of (movsl;)*(movsw;)?(movsb;)? sequence is
11378 count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
11379 but we don't know whether upper 24 (resp. 56) bits of %ecx will be
11380 known to be zero or not. The rep; movsb sequence causes higher
11381 register pressure though, so take that into account. */
11383 if ((!optimize || optimize_size)
11388 || (count & 3) + count / 4 > 6))))
11390 emit_insn (gen_cld ());
11391 countreg = ix86_zero_extend_to_Pmode (count_exp);
11392 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
11393 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
11394 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
11398 /* For constant aligned (or small unaligned) copies use rep movsl
11399 followed by code copying the rest. For PentiumPro ensure 8 byte
11400 alignment to allow rep movsl acceleration. */
11402 else if (count != 0
11404 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11405 || optimize_size || count < (unsigned int) 64))
11407 unsigned HOST_WIDE_INT offset = 0;
11408 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11409 rtx srcmem, dstmem;
11411 emit_insn (gen_cld ());
11412 if (count & ~(size - 1))
11414 if ((TARGET_SINGLE_STRINGOP || optimize_size) && count < 5 * 4)
11416 enum machine_mode movs_mode = size == 4 ? SImode : DImode;
11418 while (offset < (count & ~(size - 1)))
11420 srcmem = adjust_automodify_address_nv (src, movs_mode,
11422 dstmem = adjust_automodify_address_nv (dst, movs_mode,
11424 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11430 countreg = GEN_INT ((count >> (size == 4 ? 2 : 3))
11431 & (TARGET_64BIT ? -1 : 0x3fffffff));
11432 countreg = copy_to_mode_reg (counter_mode, countreg);
11433 countreg = ix86_zero_extend_to_Pmode (countreg);
11435 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11436 GEN_INT (size == 4 ? 2 : 3));
11437 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11438 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11440 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11441 countreg, destexp, srcexp));
11442 offset = count & ~(size - 1);
11445 if (size == 8 && (count & 0x04))
11447 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
11449 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
11451 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11456 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
11458 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
11460 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11465 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
11467 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
11469 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11472 /* The generic code based on the glibc implementation:
11473 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
11474 allowing accelerated copying there)
11475 - copy the data using rep movsl
11476 - copy the rest. */
11481 rtx srcmem, dstmem;
11482 int desired_alignment = (TARGET_PENTIUMPRO
11483 && (count == 0 || count >= (unsigned int) 260)
11484 ? 8 : UNITS_PER_WORD);
11485 /* Get rid of MEM_OFFSETs, they won't be accurate. */
11486 dst = change_address (dst, BLKmode, destreg);
11487 src = change_address (src, BLKmode, srcreg);
11489 /* In case we don't know anything about the alignment, default to
11490 library version, since it is usually equally fast and result in
11493 Also emit call when we know that the count is large and call overhead
11494 will not be important. */
11495 if (!TARGET_INLINE_ALL_STRINGOPS
11496 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11499 if (TARGET_SINGLE_STRINGOP)
11500 emit_insn (gen_cld ());
11502 countreg2 = gen_reg_rtx (Pmode);
11503 countreg = copy_to_mode_reg (counter_mode, count_exp);
11505 /* We don't use loops to align destination and to copy parts smaller
11506 than 4 bytes, because gcc is able to optimize such code better (in
11507 the case the destination or the count really is aligned, gcc is often
11508 able to predict the branches) and also it is friendlier to the
11509 hardware branch prediction.
11511 Using loops is beneficial for generic case, because we can
11512 handle small counts using the loops. Many CPUs (such as Athlon)
11513 have large REP prefix setup costs.
11515 This is quite costly. Maybe we can revisit this decision later or
11516 add some customizability to this code. */
11518 if (count == 0 && align < desired_alignment)
11520 label = gen_label_rtx ();
11521 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11522 LEU, 0, counter_mode, 1, label);
11526 rtx label = ix86_expand_aligntest (destreg, 1);
11527 srcmem = change_address (src, QImode, srcreg);
11528 dstmem = change_address (dst, QImode, destreg);
11529 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11530 ix86_adjust_counter (countreg, 1);
11531 emit_label (label);
11532 LABEL_NUSES (label) = 1;
11536 rtx label = ix86_expand_aligntest (destreg, 2);
11537 srcmem = change_address (src, HImode, srcreg);
11538 dstmem = change_address (dst, HImode, destreg);
11539 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11540 ix86_adjust_counter (countreg, 2);
11541 emit_label (label);
11542 LABEL_NUSES (label) = 1;
11544 if (align <= 4 && desired_alignment > 4)
11546 rtx label = ix86_expand_aligntest (destreg, 4);
11547 srcmem = change_address (src, SImode, srcreg);
11548 dstmem = change_address (dst, SImode, destreg);
11549 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11550 ix86_adjust_counter (countreg, 4);
11551 emit_label (label);
11552 LABEL_NUSES (label) = 1;
11555 if (label && desired_alignment > 4 && !TARGET_64BIT)
11557 emit_label (label);
11558 LABEL_NUSES (label) = 1;
11561 if (!TARGET_SINGLE_STRINGOP)
11562 emit_insn (gen_cld ());
11565 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11567 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
11571 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
11572 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
11574 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11575 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11576 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11577 countreg2, destexp, srcexp));
11581 emit_label (label);
11582 LABEL_NUSES (label) = 1;
11584 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11586 srcmem = change_address (src, SImode, srcreg);
11587 dstmem = change_address (dst, SImode, destreg);
11588 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11590 if ((align <= 4 || count == 0) && TARGET_64BIT)
11592 rtx label = ix86_expand_aligntest (countreg, 4);
11593 srcmem = change_address (src, SImode, srcreg);
11594 dstmem = change_address (dst, SImode, destreg);
11595 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11596 emit_label (label);
11597 LABEL_NUSES (label) = 1;
11599 if (align > 2 && count != 0 && (count & 2))
11601 srcmem = change_address (src, HImode, srcreg);
11602 dstmem = change_address (dst, HImode, destreg);
11603 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11605 if (align <= 2 || count == 0)
11607 rtx label = ix86_expand_aligntest (countreg, 2);
11608 srcmem = change_address (src, HImode, srcreg);
11609 dstmem = change_address (dst, HImode, destreg);
11610 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11611 emit_label (label);
11612 LABEL_NUSES (label) = 1;
11614 if (align > 1 && count != 0 && (count & 1))
11616 srcmem = change_address (src, QImode, srcreg);
11617 dstmem = change_address (dst, QImode, destreg);
11618 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11620 if (align <= 1 || count == 0)
11622 rtx label = ix86_expand_aligntest (countreg, 1);
11623 srcmem = change_address (src, QImode, srcreg);
11624 dstmem = change_address (dst, QImode, destreg);
11625 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11626 emit_label (label);
11627 LABEL_NUSES (label) = 1;
11634 /* Expand string clear operation (bzero). Use i386 string operations when
11635 profitable. expand_movmem contains similar code. */
11637 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
11639 rtx destreg, zeroreg, countreg, destexp;
11640 enum machine_mode counter_mode;
11641 HOST_WIDE_INT align = 0;
11642 unsigned HOST_WIDE_INT count = 0;
11644 if (GET_CODE (align_exp) == CONST_INT)
11645 align = INTVAL (align_exp);
11647 /* Can't use any of this if the user has appropriated esi. */
11648 if (global_regs[4])
11651 /* This simple hack avoids all inlining code and simplifies code below. */
11652 if (!TARGET_ALIGN_STRINGOPS)
11655 if (GET_CODE (count_exp) == CONST_INT)
11657 count = INTVAL (count_exp);
11658 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11661 /* Figure out proper mode for counter. For 32bits it is always SImode,
11662 for 64bits use SImode when possible, otherwise DImode.
11663 Set count to number of bytes copied when known at compile time. */
11665 || GET_MODE (count_exp) == SImode
11666 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
11667 counter_mode = SImode;
11669 counter_mode = DImode;
11671 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
11672 if (destreg != XEXP (dst, 0))
11673 dst = replace_equiv_address_nv (dst, destreg);
11676 /* When optimizing for size emit simple rep ; movsb instruction for
11677 counts not divisible by 4. The movl $N, %ecx; rep; stosb
11678 sequence is 7 bytes long, so if optimizing for size and count is
11679 small enough that some stosl, stosw and stosb instructions without
11680 rep are shorter, fall back into the next if. */
11682 if ((!optimize || optimize_size)
11685 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
11687 emit_insn (gen_cld ());
11689 countreg = ix86_zero_extend_to_Pmode (count_exp);
11690 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
11691 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
11692 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
11694 else if (count != 0
11696 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11697 || optimize_size || count < (unsigned int) 64))
11699 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11700 unsigned HOST_WIDE_INT offset = 0;
11702 emit_insn (gen_cld ());
11704 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
11705 if (count & ~(size - 1))
11707 unsigned HOST_WIDE_INT repcount;
11708 unsigned int max_nonrep;
11710 repcount = count >> (size == 4 ? 2 : 3);
11712 repcount &= 0x3fffffff;
11714 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
11715 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
11716 bytes. In both cases the latter seems to be faster for small
11718 max_nonrep = size == 4 ? 7 : 4;
11719 if (!optimize_size)
11722 case PROCESSOR_PENTIUM4:
11723 case PROCESSOR_NOCONA:
11730 if (repcount <= max_nonrep)
11731 while (repcount-- > 0)
11733 rtx mem = adjust_automodify_address_nv (dst,
11734 GET_MODE (zeroreg),
11736 emit_insn (gen_strset (destreg, mem, zeroreg));
11741 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
11742 countreg = ix86_zero_extend_to_Pmode (countreg);
11743 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11744 GEN_INT (size == 4 ? 2 : 3));
11745 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11746 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
11748 offset = count & ~(size - 1);
11751 if (size == 8 && (count & 0x04))
11753 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
11755 emit_insn (gen_strset (destreg, mem,
11756 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11761 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
11763 emit_insn (gen_strset (destreg, mem,
11764 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11769 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
11771 emit_insn (gen_strset (destreg, mem,
11772 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11779 /* Compute desired alignment of the string operation. */
11780 int desired_alignment = (TARGET_PENTIUMPRO
11781 && (count == 0 || count >= (unsigned int) 260)
11782 ? 8 : UNITS_PER_WORD);
11784 /* In case we don't know anything about the alignment, default to
11785 library version, since it is usually equally fast and result in
11788 Also emit call when we know that the count is large and call overhead
11789 will not be important. */
11790 if (!TARGET_INLINE_ALL_STRINGOPS
11791 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11794 if (TARGET_SINGLE_STRINGOP)
11795 emit_insn (gen_cld ());
11797 countreg2 = gen_reg_rtx (Pmode);
11798 countreg = copy_to_mode_reg (counter_mode, count_exp);
11799 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
11800 /* Get rid of MEM_OFFSET, it won't be accurate. */
11801 dst = change_address (dst, BLKmode, destreg);
11803 if (count == 0 && align < desired_alignment)
11805 label = gen_label_rtx ();
11806 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11807 LEU, 0, counter_mode, 1, label);
11811 rtx label = ix86_expand_aligntest (destreg, 1);
11812 emit_insn (gen_strset (destreg, dst,
11813 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11814 ix86_adjust_counter (countreg, 1);
11815 emit_label (label);
11816 LABEL_NUSES (label) = 1;
11820 rtx label = ix86_expand_aligntest (destreg, 2);
11821 emit_insn (gen_strset (destreg, dst,
11822 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11823 ix86_adjust_counter (countreg, 2);
11824 emit_label (label);
11825 LABEL_NUSES (label) = 1;
11827 if (align <= 4 && desired_alignment > 4)
11829 rtx label = ix86_expand_aligntest (destreg, 4);
11830 emit_insn (gen_strset (destreg, dst,
11832 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
11834 ix86_adjust_counter (countreg, 4);
11835 emit_label (label);
11836 LABEL_NUSES (label) = 1;
11839 if (label && desired_alignment > 4 && !TARGET_64BIT)
11841 emit_label (label);
11842 LABEL_NUSES (label) = 1;
11846 if (!TARGET_SINGLE_STRINGOP)
11847 emit_insn (gen_cld ());
11850 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11852 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
11856 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
11857 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
11859 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11860 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
11864 emit_label (label);
11865 LABEL_NUSES (label) = 1;
11868 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11869 emit_insn (gen_strset (destreg, dst,
11870 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11871 if (TARGET_64BIT && (align <= 4 || count == 0))
11873 rtx label = ix86_expand_aligntest (countreg, 4);
11874 emit_insn (gen_strset (destreg, dst,
11875 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11876 emit_label (label);
11877 LABEL_NUSES (label) = 1;
11879 if (align > 2 && count != 0 && (count & 2))
11880 emit_insn (gen_strset (destreg, dst,
11881 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11882 if (align <= 2 || count == 0)
11884 rtx label = ix86_expand_aligntest (countreg, 2);
11885 emit_insn (gen_strset (destreg, dst,
11886 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11887 emit_label (label);
11888 LABEL_NUSES (label) = 1;
11890 if (align > 1 && count != 0 && (count & 1))
11891 emit_insn (gen_strset (destreg, dst,
11892 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11893 if (align <= 1 || count == 0)
11895 rtx label = ix86_expand_aligntest (countreg, 1);
11896 emit_insn (gen_strset (destreg, dst,
11897 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11898 emit_label (label);
11899 LABEL_NUSES (label) = 1;
11905 /* Expand strlen. */
11907 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
11909 rtx addr, scratch1, scratch2, scratch3, scratch4;
11911 /* The generic case of strlen expander is long. Avoid it's
11912 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
11914 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11915 && !TARGET_INLINE_ALL_STRINGOPS
11917 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
11920 addr = force_reg (Pmode, XEXP (src, 0));
11921 scratch1 = gen_reg_rtx (Pmode);
11923 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11926 /* Well it seems that some optimizer does not combine a call like
11927 foo(strlen(bar), strlen(bar));
11928 when the move and the subtraction is done here. It does calculate
11929 the length just once when these instructions are done inside of
11930 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
11931 often used and I use one fewer register for the lifetime of
11932 output_strlen_unroll() this is better. */
11934 emit_move_insn (out, addr);
11936 ix86_expand_strlensi_unroll_1 (out, src, align);
11938 /* strlensi_unroll_1 returns the address of the zero at the end of
11939 the string, like memchr(), so compute the length by subtracting
11940 the start address. */
11942 emit_insn (gen_subdi3 (out, out, addr));
11944 emit_insn (gen_subsi3 (out, out, addr));
11949 scratch2 = gen_reg_rtx (Pmode);
11950 scratch3 = gen_reg_rtx (Pmode);
11951 scratch4 = force_reg (Pmode, constm1_rtx);
11953 emit_move_insn (scratch3, addr);
11954 eoschar = force_reg (QImode, eoschar);
11956 emit_insn (gen_cld ());
11957 src = replace_equiv_address_nv (src, scratch3);
11959 /* If .md starts supporting :P, this can be done in .md. */
11960 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
11961 scratch4), UNSPEC_SCAS);
11962 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
11965 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
11966 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
11970 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
11971 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
11977 /* Expand the appropriate insns for doing strlen if not just doing
11980 out = result, initialized with the start address
11981 align_rtx = alignment of the address.
11982 scratch = scratch register, initialized with the startaddress when
11983 not aligned, otherwise undefined
11985 This is just the body. It needs the initializations mentioned above and
11986 some address computing at the end. These things are done in i386.md. */
11989 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
11993 rtx align_2_label = NULL_RTX;
11994 rtx align_3_label = NULL_RTX;
11995 rtx align_4_label = gen_label_rtx ();
11996 rtx end_0_label = gen_label_rtx ();
11998 rtx tmpreg = gen_reg_rtx (SImode);
11999 rtx scratch = gen_reg_rtx (SImode);
12003 if (GET_CODE (align_rtx) == CONST_INT)
12004 align = INTVAL (align_rtx);
12006 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
12008 /* Is there a known alignment and is it less than 4? */
12011 rtx scratch1 = gen_reg_rtx (Pmode);
12012 emit_move_insn (scratch1, out);
12013 /* Is there a known alignment and is it not 2? */
12016 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
12017 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
12019 /* Leave just the 3 lower bits. */
12020 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
12021 NULL_RTX, 0, OPTAB_WIDEN);
12023 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12024 Pmode, 1, align_4_label);
12025 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
12026 Pmode, 1, align_2_label);
12027 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
12028 Pmode, 1, align_3_label);
12032 /* Since the alignment is 2, we have to check 2 or 0 bytes;
12033 check if is aligned to 4 - byte. */
12035 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
12036 NULL_RTX, 0, OPTAB_WIDEN);
12038 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12039 Pmode, 1, align_4_label);
12042 mem = change_address (src, QImode, out);
12044 /* Now compare the bytes. */
12046 /* Compare the first n unaligned byte on a byte per byte basis. */
12047 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
12048 QImode, 1, end_0_label);
12050 /* Increment the address. */
12052 emit_insn (gen_adddi3 (out, out, const1_rtx));
12054 emit_insn (gen_addsi3 (out, out, const1_rtx));
12056 /* Not needed with an alignment of 2 */
12059 emit_label (align_2_label);
12061 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12065 emit_insn (gen_adddi3 (out, out, const1_rtx));
12067 emit_insn (gen_addsi3 (out, out, const1_rtx));
12069 emit_label (align_3_label);
12072 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12076 emit_insn (gen_adddi3 (out, out, const1_rtx));
12078 emit_insn (gen_addsi3 (out, out, const1_rtx));
12081 /* Generate loop to check 4 bytes at a time. It is not a good idea to
12082 align this loop. It gives only huge programs, but does not help to
12084 emit_label (align_4_label);
12086 mem = change_address (src, SImode, out);
12087 emit_move_insn (scratch, mem);
12089 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
12091 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
12093 /* This formula yields a nonzero result iff one of the bytes is zero.
12094 This saves three branches inside loop and many cycles. */
12096 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
12097 emit_insn (gen_one_cmplsi2 (scratch, scratch));
12098 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
12099 emit_insn (gen_andsi3 (tmpreg, tmpreg,
12100 gen_int_mode (0x80808080, SImode)));
12101 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
12106 rtx reg = gen_reg_rtx (SImode);
12107 rtx reg2 = gen_reg_rtx (Pmode);
12108 emit_move_insn (reg, tmpreg);
12109 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
12111 /* If zero is not in the first two bytes, move two bytes forward. */
12112 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12113 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12114 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12115 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
12116 gen_rtx_IF_THEN_ELSE (SImode, tmp,
12119 /* Emit lea manually to avoid clobbering of flags. */
12120 emit_insn (gen_rtx_SET (SImode, reg2,
12121 gen_rtx_PLUS (Pmode, out, const2_rtx)));
12123 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12124 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12125 emit_insn (gen_rtx_SET (VOIDmode, out,
12126 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
12133 rtx end_2_label = gen_label_rtx ();
12134 /* Is zero in the first two bytes? */
12136 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12137 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12138 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
12139 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12140 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
12142 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12143 JUMP_LABEL (tmp) = end_2_label;
12145 /* Not in the first two. Move two bytes forward. */
12146 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
12148 emit_insn (gen_adddi3 (out, out, const2_rtx));
12150 emit_insn (gen_addsi3 (out, out, const2_rtx));
12152 emit_label (end_2_label);
12156 /* Avoid branch in fixing the byte. */
12157 tmpreg = gen_lowpart (QImode, tmpreg);
12158 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
12159 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
12161 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
12163 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
12165 emit_label (end_0_label);
12169 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
12170 rtx callarg2 ATTRIBUTE_UNUSED,
12171 rtx pop, int sibcall)
12173 rtx use = NULL, call;
12175 if (pop == const0_rtx)
12177 gcc_assert (!TARGET_64BIT || !pop);
12180 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
12181 fnaddr = machopic_indirect_call_target (fnaddr);
12183 /* Static functions and indirect calls don't need the pic register. */
12184 if (! TARGET_64BIT && flag_pic
12185 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
12186 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
12187 use_reg (&use, pic_offset_table_rtx);
12189 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
12191 rtx al = gen_rtx_REG (QImode, 0);
12192 emit_move_insn (al, callarg2);
12193 use_reg (&use, al);
12195 #endif /* TARGET_MACHO */
12197 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
12199 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12200 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12202 if (sibcall && TARGET_64BIT
12203 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
12206 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12207 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
12208 emit_move_insn (fnaddr, addr);
12209 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12212 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
12214 call = gen_rtx_SET (VOIDmode, retval, call);
12217 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
12218 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
12219 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
12222 call = emit_call_insn (call);
12224 CALL_INSN_FUNCTION_USAGE (call) = use;
12228 /* Clear stack slot assignments remembered from previous functions.
12229 This is called from INIT_EXPANDERS once before RTL is emitted for each
12232 static struct machine_function *
12233 ix86_init_machine_status (void)
12235 struct machine_function *f;
12237 f = ggc_alloc_cleared (sizeof (struct machine_function));
12238 f->use_fast_prologue_epilogue_nregs = -1;
12243 /* Return a MEM corresponding to a stack slot with mode MODE.
12244 Allocate a new slot if necessary.
12246 The RTL for a function can have several slots available: N is
12247 which slot to use. */
12250 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
12252 struct stack_local_entry *s;
12254 gcc_assert (n < MAX_386_STACK_LOCALS);
12256 for (s = ix86_stack_locals; s; s = s->next)
12257 if (s->mode == mode && s->n == n)
12260 s = (struct stack_local_entry *)
12261 ggc_alloc (sizeof (struct stack_local_entry));
12264 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
12266 s->next = ix86_stack_locals;
12267 ix86_stack_locals = s;
12271 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12273 static GTY(()) rtx ix86_tls_symbol;
12275 ix86_tls_get_addr (void)
12278 if (!ix86_tls_symbol)
12280 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
12281 (TARGET_GNU_TLS && !TARGET_64BIT)
12282 ? "___tls_get_addr"
12283 : "__tls_get_addr");
12286 return ix86_tls_symbol;
12289 /* Calculate the length of the memory address in the instruction
12290 encoding. Does not include the one-byte modrm, opcode, or prefix. */
12293 memory_address_length (rtx addr)
12295 struct ix86_address parts;
12296 rtx base, index, disp;
12300 if (GET_CODE (addr) == PRE_DEC
12301 || GET_CODE (addr) == POST_INC
12302 || GET_CODE (addr) == PRE_MODIFY
12303 || GET_CODE (addr) == POST_MODIFY)
12306 ok = ix86_decompose_address (addr, &parts);
12309 if (parts.base && GET_CODE (parts.base) == SUBREG)
12310 parts.base = SUBREG_REG (parts.base);
12311 if (parts.index && GET_CODE (parts.index) == SUBREG)
12312 parts.index = SUBREG_REG (parts.index);
12315 index = parts.index;
12320 - esp as the base always wants an index,
12321 - ebp as the base always wants a displacement. */
12323 /* Register Indirect. */
12324 if (base && !index && !disp)
12326 /* esp (for its index) and ebp (for its displacement) need
12327 the two-byte modrm form. */
12328 if (addr == stack_pointer_rtx
12329 || addr == arg_pointer_rtx
12330 || addr == frame_pointer_rtx
12331 || addr == hard_frame_pointer_rtx)
12335 /* Direct Addressing. */
12336 else if (disp && !base && !index)
12341 /* Find the length of the displacement constant. */
12344 if (GET_CODE (disp) == CONST_INT
12345 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
12351 /* ebp always wants a displacement. */
12352 else if (base == hard_frame_pointer_rtx)
12355 /* An index requires the two-byte modrm form.... */
12357 /* ...like esp, which always wants an index. */
12358 || base == stack_pointer_rtx
12359 || base == arg_pointer_rtx
12360 || base == frame_pointer_rtx)
12367 /* Compute default value for "length_immediate" attribute. When SHORTFORM
12368 is set, expect that insn have 8bit immediate alternative. */
12370 ix86_attr_length_immediate_default (rtx insn, int shortform)
12374 extract_insn_cached (insn);
12375 for (i = recog_data.n_operands - 1; i >= 0; --i)
12376 if (CONSTANT_P (recog_data.operand[i]))
12380 && GET_CODE (recog_data.operand[i]) == CONST_INT
12381 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
12385 switch (get_attr_mode (insn))
12396 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
12401 fatal_insn ("unknown insn mode", insn);
12407 /* Compute default value for "length_address" attribute. */
12409 ix86_attr_length_address_default (rtx insn)
12413 if (get_attr_type (insn) == TYPE_LEA)
12415 rtx set = PATTERN (insn);
12417 if (GET_CODE (set) == PARALLEL)
12418 set = XVECEXP (set, 0, 0);
12420 gcc_assert (GET_CODE (set) == SET);
12422 return memory_address_length (SET_SRC (set));
12425 extract_insn_cached (insn);
12426 for (i = recog_data.n_operands - 1; i >= 0; --i)
12427 if (GET_CODE (recog_data.operand[i]) == MEM)
12429 return memory_address_length (XEXP (recog_data.operand[i], 0));
12435 /* Return the maximum number of instructions a cpu can issue. */
12438 ix86_issue_rate (void)
12442 case PROCESSOR_PENTIUM:
12446 case PROCESSOR_PENTIUMPRO:
12447 case PROCESSOR_PENTIUM4:
12448 case PROCESSOR_ATHLON:
12450 case PROCESSOR_NOCONA:
12458 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
12459 by DEP_INSN and nothing set by DEP_INSN. */
12462 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
12466 /* Simplify the test for uninteresting insns. */
12467 if (insn_type != TYPE_SETCC
12468 && insn_type != TYPE_ICMOV
12469 && insn_type != TYPE_FCMOV
12470 && insn_type != TYPE_IBR)
12473 if ((set = single_set (dep_insn)) != 0)
12475 set = SET_DEST (set);
12478 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
12479 && XVECLEN (PATTERN (dep_insn), 0) == 2
12480 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
12481 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
12483 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
12484 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
12489 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
12492 /* This test is true if the dependent insn reads the flags but
12493 not any other potentially set register. */
12494 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
12497 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
12503 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
12504 address with operands set by DEP_INSN. */
12507 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
12511 if (insn_type == TYPE_LEA
12514 addr = PATTERN (insn);
12516 if (GET_CODE (addr) == PARALLEL)
12517 addr = XVECEXP (addr, 0, 0);
12519 gcc_assert (GET_CODE (addr) == SET);
12521 addr = SET_SRC (addr);
12526 extract_insn_cached (insn);
12527 for (i = recog_data.n_operands - 1; i >= 0; --i)
12528 if (GET_CODE (recog_data.operand[i]) == MEM)
12530 addr = XEXP (recog_data.operand[i], 0);
12537 return modified_in_p (addr, dep_insn);
12541 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
12543 enum attr_type insn_type, dep_insn_type;
12544 enum attr_memory memory;
12546 int dep_insn_code_number;
12548 /* Anti and output dependencies have zero cost on all CPUs. */
12549 if (REG_NOTE_KIND (link) != 0)
12552 dep_insn_code_number = recog_memoized (dep_insn);
12554 /* If we can't recognize the insns, we can't really do anything. */
12555 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
12558 insn_type = get_attr_type (insn);
12559 dep_insn_type = get_attr_type (dep_insn);
12563 case PROCESSOR_PENTIUM:
12564 /* Address Generation Interlock adds a cycle of latency. */
12565 if (ix86_agi_dependant (insn, dep_insn, insn_type))
12568 /* ??? Compares pair with jump/setcc. */
12569 if (ix86_flags_dependant (insn, dep_insn, insn_type))
12572 /* Floating point stores require value to be ready one cycle earlier. */
12573 if (insn_type == TYPE_FMOV
12574 && get_attr_memory (insn) == MEMORY_STORE
12575 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12579 case PROCESSOR_PENTIUMPRO:
12580 memory = get_attr_memory (insn);
12582 /* INT->FP conversion is expensive. */
12583 if (get_attr_fp_int_src (dep_insn))
12586 /* There is one cycle extra latency between an FP op and a store. */
12587 if (insn_type == TYPE_FMOV
12588 && (set = single_set (dep_insn)) != NULL_RTX
12589 && (set2 = single_set (insn)) != NULL_RTX
12590 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
12591 && GET_CODE (SET_DEST (set2)) == MEM)
12594 /* Show ability of reorder buffer to hide latency of load by executing
12595 in parallel with previous instruction in case
12596 previous instruction is not needed to compute the address. */
12597 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12598 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12600 /* Claim moves to take one cycle, as core can issue one load
12601 at time and the next load can start cycle later. */
12602 if (dep_insn_type == TYPE_IMOV
12603 || dep_insn_type == TYPE_FMOV)
12611 memory = get_attr_memory (insn);
12613 /* The esp dependency is resolved before the instruction is really
12615 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
12616 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
12619 /* INT->FP conversion is expensive. */
12620 if (get_attr_fp_int_src (dep_insn))
12623 /* Show ability of reorder buffer to hide latency of load by executing
12624 in parallel with previous instruction in case
12625 previous instruction is not needed to compute the address. */
12626 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12627 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12629 /* Claim moves to take one cycle, as core can issue one load
12630 at time and the next load can start cycle later. */
12631 if (dep_insn_type == TYPE_IMOV
12632 || dep_insn_type == TYPE_FMOV)
12641 case PROCESSOR_ATHLON:
12643 memory = get_attr_memory (insn);
12645 /* Show ability of reorder buffer to hide latency of load by executing
12646 in parallel with previous instruction in case
12647 previous instruction is not needed to compute the address. */
12648 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12649 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12651 enum attr_unit unit = get_attr_unit (insn);
12654 /* Because of the difference between the length of integer and
12655 floating unit pipeline preparation stages, the memory operands
12656 for floating point are cheaper.
12658 ??? For Athlon it the difference is most probably 2. */
12659 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
12662 loadcost = TARGET_ATHLON ? 2 : 0;
12664 if (cost >= loadcost)
12677 /* How many alternative schedules to try. This should be as wide as the
12678 scheduling freedom in the DFA, but no wider. Making this value too
12679 large results extra work for the scheduler. */
12682 ia32_multipass_dfa_lookahead (void)
12684 if (ix86_tune == PROCESSOR_PENTIUM)
12687 if (ix86_tune == PROCESSOR_PENTIUMPRO
12688 || ix86_tune == PROCESSOR_K6)
12696 /* Compute the alignment given to a constant that is being placed in memory.
12697 EXP is the constant and ALIGN is the alignment that the object would
12699 The value of this function is used instead of that alignment to align
12703 ix86_constant_alignment (tree exp, int align)
12705 if (TREE_CODE (exp) == REAL_CST)
12707 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
12709 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
12712 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
12713 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
12714 return BITS_PER_WORD;
12719 /* Compute the alignment for a static variable.
12720 TYPE is the data type, and ALIGN is the alignment that
12721 the object would ordinarily have. The value of this function is used
12722 instead of that alignment to align the object. */
12725 ix86_data_alignment (tree type, int align)
12727 if (AGGREGATE_TYPE_P (type)
12728 && TYPE_SIZE (type)
12729 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12730 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
12731 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
12734 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12735 to 16byte boundary. */
12738 if (AGGREGATE_TYPE_P (type)
12739 && TYPE_SIZE (type)
12740 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12741 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
12742 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12746 if (TREE_CODE (type) == ARRAY_TYPE)
12748 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12750 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12753 else if (TREE_CODE (type) == COMPLEX_TYPE)
12756 if (TYPE_MODE (type) == DCmode && align < 64)
12758 if (TYPE_MODE (type) == XCmode && align < 128)
12761 else if ((TREE_CODE (type) == RECORD_TYPE
12762 || TREE_CODE (type) == UNION_TYPE
12763 || TREE_CODE (type) == QUAL_UNION_TYPE)
12764 && TYPE_FIELDS (type))
12766 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12768 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12771 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12772 || TREE_CODE (type) == INTEGER_TYPE)
12774 if (TYPE_MODE (type) == DFmode && align < 64)
12776 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12783 /* Compute the alignment for a local variable.
12784 TYPE is the data type, and ALIGN is the alignment that
12785 the object would ordinarily have. The value of this macro is used
12786 instead of that alignment to align the object. */
12789 ix86_local_alignment (tree type, int align)
12791 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12792 to 16byte boundary. */
12795 if (AGGREGATE_TYPE_P (type)
12796 && TYPE_SIZE (type)
12797 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12798 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
12799 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12802 if (TREE_CODE (type) == ARRAY_TYPE)
12804 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12806 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12809 else if (TREE_CODE (type) == COMPLEX_TYPE)
12811 if (TYPE_MODE (type) == DCmode && align < 64)
12813 if (TYPE_MODE (type) == XCmode && align < 128)
12816 else if ((TREE_CODE (type) == RECORD_TYPE
12817 || TREE_CODE (type) == UNION_TYPE
12818 || TREE_CODE (type) == QUAL_UNION_TYPE)
12819 && TYPE_FIELDS (type))
12821 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12823 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12826 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12827 || TREE_CODE (type) == INTEGER_TYPE)
12830 if (TYPE_MODE (type) == DFmode && align < 64)
12832 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12838 /* Emit RTL insns to initialize the variable parts of a trampoline.
12839 FNADDR is an RTX for the address of the function's pure code.
12840 CXT is an RTX for the static chain value for the function. */
12842 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
12846 /* Compute offset from the end of the jmp to the target function. */
12847 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
12848 plus_constant (tramp, 10),
12849 NULL_RTX, 1, OPTAB_DIRECT);
12850 emit_move_insn (gen_rtx_MEM (QImode, tramp),
12851 gen_int_mode (0xb9, QImode));
12852 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
12853 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
12854 gen_int_mode (0xe9, QImode));
12855 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
12860 /* Try to load address using shorter movl instead of movabs.
12861 We may want to support movq for kernel mode, but kernel does not use
12862 trampolines at the moment. */
12863 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
12865 fnaddr = copy_to_mode_reg (DImode, fnaddr);
12866 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12867 gen_int_mode (0xbb41, HImode));
12868 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
12869 gen_lowpart (SImode, fnaddr));
12874 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12875 gen_int_mode (0xbb49, HImode));
12876 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12880 /* Load static chain using movabs to r10. */
12881 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12882 gen_int_mode (0xba49, HImode));
12883 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12886 /* Jump to the r11 */
12887 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12888 gen_int_mode (0xff49, HImode));
12889 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
12890 gen_int_mode (0xe3, QImode));
12892 gcc_assert (offset <= TRAMPOLINE_SIZE);
12895 #ifdef ENABLE_EXECUTE_STACK
12896 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
12897 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
12901 /* Codes for all the SSE/MMX builtins. */
12904 IX86_BUILTIN_ADDPS,
12905 IX86_BUILTIN_ADDSS,
12906 IX86_BUILTIN_DIVPS,
12907 IX86_BUILTIN_DIVSS,
12908 IX86_BUILTIN_MULPS,
12909 IX86_BUILTIN_MULSS,
12910 IX86_BUILTIN_SUBPS,
12911 IX86_BUILTIN_SUBSS,
12913 IX86_BUILTIN_CMPEQPS,
12914 IX86_BUILTIN_CMPLTPS,
12915 IX86_BUILTIN_CMPLEPS,
12916 IX86_BUILTIN_CMPGTPS,
12917 IX86_BUILTIN_CMPGEPS,
12918 IX86_BUILTIN_CMPNEQPS,
12919 IX86_BUILTIN_CMPNLTPS,
12920 IX86_BUILTIN_CMPNLEPS,
12921 IX86_BUILTIN_CMPNGTPS,
12922 IX86_BUILTIN_CMPNGEPS,
12923 IX86_BUILTIN_CMPORDPS,
12924 IX86_BUILTIN_CMPUNORDPS,
12925 IX86_BUILTIN_CMPNEPS,
12926 IX86_BUILTIN_CMPEQSS,
12927 IX86_BUILTIN_CMPLTSS,
12928 IX86_BUILTIN_CMPLESS,
12929 IX86_BUILTIN_CMPNEQSS,
12930 IX86_BUILTIN_CMPNLTSS,
12931 IX86_BUILTIN_CMPNLESS,
12932 IX86_BUILTIN_CMPNGTSS,
12933 IX86_BUILTIN_CMPNGESS,
12934 IX86_BUILTIN_CMPORDSS,
12935 IX86_BUILTIN_CMPUNORDSS,
12936 IX86_BUILTIN_CMPNESS,
12938 IX86_BUILTIN_COMIEQSS,
12939 IX86_BUILTIN_COMILTSS,
12940 IX86_BUILTIN_COMILESS,
12941 IX86_BUILTIN_COMIGTSS,
12942 IX86_BUILTIN_COMIGESS,
12943 IX86_BUILTIN_COMINEQSS,
12944 IX86_BUILTIN_UCOMIEQSS,
12945 IX86_BUILTIN_UCOMILTSS,
12946 IX86_BUILTIN_UCOMILESS,
12947 IX86_BUILTIN_UCOMIGTSS,
12948 IX86_BUILTIN_UCOMIGESS,
12949 IX86_BUILTIN_UCOMINEQSS,
12951 IX86_BUILTIN_CVTPI2PS,
12952 IX86_BUILTIN_CVTPS2PI,
12953 IX86_BUILTIN_CVTSI2SS,
12954 IX86_BUILTIN_CVTSI642SS,
12955 IX86_BUILTIN_CVTSS2SI,
12956 IX86_BUILTIN_CVTSS2SI64,
12957 IX86_BUILTIN_CVTTPS2PI,
12958 IX86_BUILTIN_CVTTSS2SI,
12959 IX86_BUILTIN_CVTTSS2SI64,
12961 IX86_BUILTIN_MAXPS,
12962 IX86_BUILTIN_MAXSS,
12963 IX86_BUILTIN_MINPS,
12964 IX86_BUILTIN_MINSS,
12966 IX86_BUILTIN_LOADUPS,
12967 IX86_BUILTIN_STOREUPS,
12968 IX86_BUILTIN_MOVSS,
12970 IX86_BUILTIN_MOVHLPS,
12971 IX86_BUILTIN_MOVLHPS,
12972 IX86_BUILTIN_LOADHPS,
12973 IX86_BUILTIN_LOADLPS,
12974 IX86_BUILTIN_STOREHPS,
12975 IX86_BUILTIN_STORELPS,
12977 IX86_BUILTIN_MASKMOVQ,
12978 IX86_BUILTIN_MOVMSKPS,
12979 IX86_BUILTIN_PMOVMSKB,
12981 IX86_BUILTIN_MOVNTPS,
12982 IX86_BUILTIN_MOVNTQ,
12984 IX86_BUILTIN_LOADDQU,
12985 IX86_BUILTIN_STOREDQU,
12987 IX86_BUILTIN_PACKSSWB,
12988 IX86_BUILTIN_PACKSSDW,
12989 IX86_BUILTIN_PACKUSWB,
12991 IX86_BUILTIN_PADDB,
12992 IX86_BUILTIN_PADDW,
12993 IX86_BUILTIN_PADDD,
12994 IX86_BUILTIN_PADDQ,
12995 IX86_BUILTIN_PADDSB,
12996 IX86_BUILTIN_PADDSW,
12997 IX86_BUILTIN_PADDUSB,
12998 IX86_BUILTIN_PADDUSW,
12999 IX86_BUILTIN_PSUBB,
13000 IX86_BUILTIN_PSUBW,
13001 IX86_BUILTIN_PSUBD,
13002 IX86_BUILTIN_PSUBQ,
13003 IX86_BUILTIN_PSUBSB,
13004 IX86_BUILTIN_PSUBSW,
13005 IX86_BUILTIN_PSUBUSB,
13006 IX86_BUILTIN_PSUBUSW,
13009 IX86_BUILTIN_PANDN,
13013 IX86_BUILTIN_PAVGB,
13014 IX86_BUILTIN_PAVGW,
13016 IX86_BUILTIN_PCMPEQB,
13017 IX86_BUILTIN_PCMPEQW,
13018 IX86_BUILTIN_PCMPEQD,
13019 IX86_BUILTIN_PCMPGTB,
13020 IX86_BUILTIN_PCMPGTW,
13021 IX86_BUILTIN_PCMPGTD,
13023 IX86_BUILTIN_PMADDWD,
13025 IX86_BUILTIN_PMAXSW,
13026 IX86_BUILTIN_PMAXUB,
13027 IX86_BUILTIN_PMINSW,
13028 IX86_BUILTIN_PMINUB,
13030 IX86_BUILTIN_PMULHUW,
13031 IX86_BUILTIN_PMULHW,
13032 IX86_BUILTIN_PMULLW,
13034 IX86_BUILTIN_PSADBW,
13035 IX86_BUILTIN_PSHUFW,
13037 IX86_BUILTIN_PSLLW,
13038 IX86_BUILTIN_PSLLD,
13039 IX86_BUILTIN_PSLLQ,
13040 IX86_BUILTIN_PSRAW,
13041 IX86_BUILTIN_PSRAD,
13042 IX86_BUILTIN_PSRLW,
13043 IX86_BUILTIN_PSRLD,
13044 IX86_BUILTIN_PSRLQ,
13045 IX86_BUILTIN_PSLLWI,
13046 IX86_BUILTIN_PSLLDI,
13047 IX86_BUILTIN_PSLLQI,
13048 IX86_BUILTIN_PSRAWI,
13049 IX86_BUILTIN_PSRADI,
13050 IX86_BUILTIN_PSRLWI,
13051 IX86_BUILTIN_PSRLDI,
13052 IX86_BUILTIN_PSRLQI,
13054 IX86_BUILTIN_PUNPCKHBW,
13055 IX86_BUILTIN_PUNPCKHWD,
13056 IX86_BUILTIN_PUNPCKHDQ,
13057 IX86_BUILTIN_PUNPCKLBW,
13058 IX86_BUILTIN_PUNPCKLWD,
13059 IX86_BUILTIN_PUNPCKLDQ,
13061 IX86_BUILTIN_SHUFPS,
13063 IX86_BUILTIN_RCPPS,
13064 IX86_BUILTIN_RCPSS,
13065 IX86_BUILTIN_RSQRTPS,
13066 IX86_BUILTIN_RSQRTSS,
13067 IX86_BUILTIN_SQRTPS,
13068 IX86_BUILTIN_SQRTSS,
13070 IX86_BUILTIN_UNPCKHPS,
13071 IX86_BUILTIN_UNPCKLPS,
13073 IX86_BUILTIN_ANDPS,
13074 IX86_BUILTIN_ANDNPS,
13076 IX86_BUILTIN_XORPS,
13079 IX86_BUILTIN_LDMXCSR,
13080 IX86_BUILTIN_STMXCSR,
13081 IX86_BUILTIN_SFENCE,
13083 /* 3DNow! Original */
13084 IX86_BUILTIN_FEMMS,
13085 IX86_BUILTIN_PAVGUSB,
13086 IX86_BUILTIN_PF2ID,
13087 IX86_BUILTIN_PFACC,
13088 IX86_BUILTIN_PFADD,
13089 IX86_BUILTIN_PFCMPEQ,
13090 IX86_BUILTIN_PFCMPGE,
13091 IX86_BUILTIN_PFCMPGT,
13092 IX86_BUILTIN_PFMAX,
13093 IX86_BUILTIN_PFMIN,
13094 IX86_BUILTIN_PFMUL,
13095 IX86_BUILTIN_PFRCP,
13096 IX86_BUILTIN_PFRCPIT1,
13097 IX86_BUILTIN_PFRCPIT2,
13098 IX86_BUILTIN_PFRSQIT1,
13099 IX86_BUILTIN_PFRSQRT,
13100 IX86_BUILTIN_PFSUB,
13101 IX86_BUILTIN_PFSUBR,
13102 IX86_BUILTIN_PI2FD,
13103 IX86_BUILTIN_PMULHRW,
13105 /* 3DNow! Athlon Extensions */
13106 IX86_BUILTIN_PF2IW,
13107 IX86_BUILTIN_PFNACC,
13108 IX86_BUILTIN_PFPNACC,
13109 IX86_BUILTIN_PI2FW,
13110 IX86_BUILTIN_PSWAPDSI,
13111 IX86_BUILTIN_PSWAPDSF,
13114 IX86_BUILTIN_ADDPD,
13115 IX86_BUILTIN_ADDSD,
13116 IX86_BUILTIN_DIVPD,
13117 IX86_BUILTIN_DIVSD,
13118 IX86_BUILTIN_MULPD,
13119 IX86_BUILTIN_MULSD,
13120 IX86_BUILTIN_SUBPD,
13121 IX86_BUILTIN_SUBSD,
13123 IX86_BUILTIN_CMPEQPD,
13124 IX86_BUILTIN_CMPLTPD,
13125 IX86_BUILTIN_CMPLEPD,
13126 IX86_BUILTIN_CMPGTPD,
13127 IX86_BUILTIN_CMPGEPD,
13128 IX86_BUILTIN_CMPNEQPD,
13129 IX86_BUILTIN_CMPNLTPD,
13130 IX86_BUILTIN_CMPNLEPD,
13131 IX86_BUILTIN_CMPNGTPD,
13132 IX86_BUILTIN_CMPNGEPD,
13133 IX86_BUILTIN_CMPORDPD,
13134 IX86_BUILTIN_CMPUNORDPD,
13135 IX86_BUILTIN_CMPNEPD,
13136 IX86_BUILTIN_CMPEQSD,
13137 IX86_BUILTIN_CMPLTSD,
13138 IX86_BUILTIN_CMPLESD,
13139 IX86_BUILTIN_CMPNEQSD,
13140 IX86_BUILTIN_CMPNLTSD,
13141 IX86_BUILTIN_CMPNLESD,
13142 IX86_BUILTIN_CMPORDSD,
13143 IX86_BUILTIN_CMPUNORDSD,
13144 IX86_BUILTIN_CMPNESD,
13146 IX86_BUILTIN_COMIEQSD,
13147 IX86_BUILTIN_COMILTSD,
13148 IX86_BUILTIN_COMILESD,
13149 IX86_BUILTIN_COMIGTSD,
13150 IX86_BUILTIN_COMIGESD,
13151 IX86_BUILTIN_COMINEQSD,
13152 IX86_BUILTIN_UCOMIEQSD,
13153 IX86_BUILTIN_UCOMILTSD,
13154 IX86_BUILTIN_UCOMILESD,
13155 IX86_BUILTIN_UCOMIGTSD,
13156 IX86_BUILTIN_UCOMIGESD,
13157 IX86_BUILTIN_UCOMINEQSD,
13159 IX86_BUILTIN_MAXPD,
13160 IX86_BUILTIN_MAXSD,
13161 IX86_BUILTIN_MINPD,
13162 IX86_BUILTIN_MINSD,
13164 IX86_BUILTIN_ANDPD,
13165 IX86_BUILTIN_ANDNPD,
13167 IX86_BUILTIN_XORPD,
13169 IX86_BUILTIN_SQRTPD,
13170 IX86_BUILTIN_SQRTSD,
13172 IX86_BUILTIN_UNPCKHPD,
13173 IX86_BUILTIN_UNPCKLPD,
13175 IX86_BUILTIN_SHUFPD,
13177 IX86_BUILTIN_LOADUPD,
13178 IX86_BUILTIN_STOREUPD,
13179 IX86_BUILTIN_MOVSD,
13181 IX86_BUILTIN_LOADHPD,
13182 IX86_BUILTIN_LOADLPD,
13184 IX86_BUILTIN_CVTDQ2PD,
13185 IX86_BUILTIN_CVTDQ2PS,
13187 IX86_BUILTIN_CVTPD2DQ,
13188 IX86_BUILTIN_CVTPD2PI,
13189 IX86_BUILTIN_CVTPD2PS,
13190 IX86_BUILTIN_CVTTPD2DQ,
13191 IX86_BUILTIN_CVTTPD2PI,
13193 IX86_BUILTIN_CVTPI2PD,
13194 IX86_BUILTIN_CVTSI2SD,
13195 IX86_BUILTIN_CVTSI642SD,
13197 IX86_BUILTIN_CVTSD2SI,
13198 IX86_BUILTIN_CVTSD2SI64,
13199 IX86_BUILTIN_CVTSD2SS,
13200 IX86_BUILTIN_CVTSS2SD,
13201 IX86_BUILTIN_CVTTSD2SI,
13202 IX86_BUILTIN_CVTTSD2SI64,
13204 IX86_BUILTIN_CVTPS2DQ,
13205 IX86_BUILTIN_CVTPS2PD,
13206 IX86_BUILTIN_CVTTPS2DQ,
13208 IX86_BUILTIN_MOVNTI,
13209 IX86_BUILTIN_MOVNTPD,
13210 IX86_BUILTIN_MOVNTDQ,
13213 IX86_BUILTIN_MASKMOVDQU,
13214 IX86_BUILTIN_MOVMSKPD,
13215 IX86_BUILTIN_PMOVMSKB128,
13217 IX86_BUILTIN_PACKSSWB128,
13218 IX86_BUILTIN_PACKSSDW128,
13219 IX86_BUILTIN_PACKUSWB128,
13221 IX86_BUILTIN_PADDB128,
13222 IX86_BUILTIN_PADDW128,
13223 IX86_BUILTIN_PADDD128,
13224 IX86_BUILTIN_PADDQ128,
13225 IX86_BUILTIN_PADDSB128,
13226 IX86_BUILTIN_PADDSW128,
13227 IX86_BUILTIN_PADDUSB128,
13228 IX86_BUILTIN_PADDUSW128,
13229 IX86_BUILTIN_PSUBB128,
13230 IX86_BUILTIN_PSUBW128,
13231 IX86_BUILTIN_PSUBD128,
13232 IX86_BUILTIN_PSUBQ128,
13233 IX86_BUILTIN_PSUBSB128,
13234 IX86_BUILTIN_PSUBSW128,
13235 IX86_BUILTIN_PSUBUSB128,
13236 IX86_BUILTIN_PSUBUSW128,
13238 IX86_BUILTIN_PAND128,
13239 IX86_BUILTIN_PANDN128,
13240 IX86_BUILTIN_POR128,
13241 IX86_BUILTIN_PXOR128,
13243 IX86_BUILTIN_PAVGB128,
13244 IX86_BUILTIN_PAVGW128,
13246 IX86_BUILTIN_PCMPEQB128,
13247 IX86_BUILTIN_PCMPEQW128,
13248 IX86_BUILTIN_PCMPEQD128,
13249 IX86_BUILTIN_PCMPGTB128,
13250 IX86_BUILTIN_PCMPGTW128,
13251 IX86_BUILTIN_PCMPGTD128,
13253 IX86_BUILTIN_PMADDWD128,
13255 IX86_BUILTIN_PMAXSW128,
13256 IX86_BUILTIN_PMAXUB128,
13257 IX86_BUILTIN_PMINSW128,
13258 IX86_BUILTIN_PMINUB128,
13260 IX86_BUILTIN_PMULUDQ,
13261 IX86_BUILTIN_PMULUDQ128,
13262 IX86_BUILTIN_PMULHUW128,
13263 IX86_BUILTIN_PMULHW128,
13264 IX86_BUILTIN_PMULLW128,
13266 IX86_BUILTIN_PSADBW128,
13267 IX86_BUILTIN_PSHUFHW,
13268 IX86_BUILTIN_PSHUFLW,
13269 IX86_BUILTIN_PSHUFD,
13271 IX86_BUILTIN_PSLLW128,
13272 IX86_BUILTIN_PSLLD128,
13273 IX86_BUILTIN_PSLLQ128,
13274 IX86_BUILTIN_PSRAW128,
13275 IX86_BUILTIN_PSRAD128,
13276 IX86_BUILTIN_PSRLW128,
13277 IX86_BUILTIN_PSRLD128,
13278 IX86_BUILTIN_PSRLQ128,
13279 IX86_BUILTIN_PSLLDQI128,
13280 IX86_BUILTIN_PSLLWI128,
13281 IX86_BUILTIN_PSLLDI128,
13282 IX86_BUILTIN_PSLLQI128,
13283 IX86_BUILTIN_PSRAWI128,
13284 IX86_BUILTIN_PSRADI128,
13285 IX86_BUILTIN_PSRLDQI128,
13286 IX86_BUILTIN_PSRLWI128,
13287 IX86_BUILTIN_PSRLDI128,
13288 IX86_BUILTIN_PSRLQI128,
13290 IX86_BUILTIN_PUNPCKHBW128,
13291 IX86_BUILTIN_PUNPCKHWD128,
13292 IX86_BUILTIN_PUNPCKHDQ128,
13293 IX86_BUILTIN_PUNPCKHQDQ128,
13294 IX86_BUILTIN_PUNPCKLBW128,
13295 IX86_BUILTIN_PUNPCKLWD128,
13296 IX86_BUILTIN_PUNPCKLDQ128,
13297 IX86_BUILTIN_PUNPCKLQDQ128,
13299 IX86_BUILTIN_CLFLUSH,
13300 IX86_BUILTIN_MFENCE,
13301 IX86_BUILTIN_LFENCE,
13303 /* Prescott New Instructions. */
13304 IX86_BUILTIN_ADDSUBPS,
13305 IX86_BUILTIN_HADDPS,
13306 IX86_BUILTIN_HSUBPS,
13307 IX86_BUILTIN_MOVSHDUP,
13308 IX86_BUILTIN_MOVSLDUP,
13309 IX86_BUILTIN_ADDSUBPD,
13310 IX86_BUILTIN_HADDPD,
13311 IX86_BUILTIN_HSUBPD,
13312 IX86_BUILTIN_LDDQU,
13314 IX86_BUILTIN_MONITOR,
13315 IX86_BUILTIN_MWAIT,
13317 IX86_BUILTIN_VEC_INIT_V2SI,
13318 IX86_BUILTIN_VEC_INIT_V4HI,
13319 IX86_BUILTIN_VEC_INIT_V8QI,
13320 IX86_BUILTIN_VEC_EXT_V2DF,
13321 IX86_BUILTIN_VEC_EXT_V2DI,
13322 IX86_BUILTIN_VEC_EXT_V4SF,
13323 IX86_BUILTIN_VEC_EXT_V4SI,
13324 IX86_BUILTIN_VEC_EXT_V8HI,
13325 IX86_BUILTIN_VEC_EXT_V2SI,
13326 IX86_BUILTIN_VEC_EXT_V4HI,
13327 IX86_BUILTIN_VEC_SET_V8HI,
13328 IX86_BUILTIN_VEC_SET_V4HI,
13333 #define def_builtin(MASK, NAME, TYPE, CODE) \
13335 if ((MASK) & target_flags \
13336 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
13337 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
13338 NULL, NULL_TREE); \
13341 /* Bits for builtin_description.flag. */
13343 /* Set when we don't support the comparison natively, and should
13344 swap_comparison in order to support it. */
13345 #define BUILTIN_DESC_SWAP_OPERANDS 1
13347 struct builtin_description
13349 const unsigned int mask;
13350 const enum insn_code icode;
13351 const char *const name;
13352 const enum ix86_builtins code;
13353 const enum rtx_code comparison;
13354 const unsigned int flag;
13357 static const struct builtin_description bdesc_comi[] =
13359 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
13360 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
13361 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
13362 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
13363 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
13364 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
13365 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
13366 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
13367 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
13368 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
13369 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
13370 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
13371 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
13372 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
13373 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
13374 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
13375 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
13376 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
13377 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
13378 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
13379 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
13380 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
13381 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
13382 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
13385 static const struct builtin_description bdesc_2arg[] =
13388 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
13389 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
13390 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
13391 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
13392 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
13393 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
13394 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
13395 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
13397 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
13398 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
13399 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
13400 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
13401 BUILTIN_DESC_SWAP_OPERANDS },
13402 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
13403 BUILTIN_DESC_SWAP_OPERANDS },
13404 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
13405 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
13406 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
13407 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
13408 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
13409 BUILTIN_DESC_SWAP_OPERANDS },
13410 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
13411 BUILTIN_DESC_SWAP_OPERANDS },
13412 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
13413 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
13414 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
13415 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
13416 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
13417 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
13418 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
13419 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
13420 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
13421 BUILTIN_DESC_SWAP_OPERANDS },
13422 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
13423 BUILTIN_DESC_SWAP_OPERANDS },
13424 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
13426 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
13427 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
13428 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
13429 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
13431 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
13432 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
13433 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
13434 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
13436 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
13437 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
13438 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
13439 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
13440 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
13443 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
13444 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
13445 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
13446 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
13447 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
13448 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
13449 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
13450 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
13452 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
13453 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
13454 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
13455 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
13456 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
13457 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
13458 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
13459 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
13461 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
13462 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
13463 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
13465 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
13466 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
13467 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
13468 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
13470 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
13471 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
13473 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
13474 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
13475 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
13476 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
13477 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
13478 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
13480 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
13481 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
13482 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
13483 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
13485 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
13486 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
13487 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
13488 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
13489 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
13490 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
13493 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
13494 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
13495 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
13497 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
13498 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
13499 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
13501 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
13502 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
13503 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
13504 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
13505 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
13506 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
13508 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
13509 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
13510 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
13511 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
13512 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
13513 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
13515 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
13516 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
13517 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
13518 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
13520 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
13521 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
13524 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
13525 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
13526 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
13527 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
13528 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
13529 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
13530 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
13531 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
13533 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
13534 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
13535 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
13536 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
13537 BUILTIN_DESC_SWAP_OPERANDS },
13538 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
13539 BUILTIN_DESC_SWAP_OPERANDS },
13540 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
13541 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
13542 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
13543 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
13544 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
13545 BUILTIN_DESC_SWAP_OPERANDS },
13546 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
13547 BUILTIN_DESC_SWAP_OPERANDS },
13548 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
13549 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
13550 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
13551 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
13552 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
13553 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
13554 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
13555 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
13556 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
13558 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
13559 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
13560 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
13561 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
13563 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
13564 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
13565 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
13566 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
13568 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
13569 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
13570 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
13573 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
13574 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
13575 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
13576 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
13577 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
13578 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
13579 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
13580 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
13582 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
13583 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
13584 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
13585 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
13586 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
13587 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
13588 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
13589 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
13591 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
13592 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
13594 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
13595 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
13596 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
13597 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
13599 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
13600 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
13602 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
13603 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
13604 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
13605 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
13606 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
13607 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
13609 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
13610 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
13611 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
13612 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
13614 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
13615 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
13616 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
13617 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
13618 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
13619 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
13620 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
13621 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
13623 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
13624 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
13625 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
13627 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
13628 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
13630 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
13631 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
13633 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
13634 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
13635 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
13637 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
13638 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
13639 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
13641 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
13642 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
13644 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
13646 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
13647 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
13648 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
13649 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
13652 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
13653 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
13654 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
13655 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
13656 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
13657 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
13660 static const struct builtin_description bdesc_1arg[] =
13662 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
13663 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
13665 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
13666 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
13667 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
13669 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
13670 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
13671 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
13672 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
13673 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
13674 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
13676 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
13677 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
13679 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
13681 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
13682 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
13684 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
13685 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
13686 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
13687 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
13688 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
13690 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
13692 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
13693 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
13694 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
13695 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
13697 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
13698 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
13699 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
13702 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
13703 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
13707 ix86_init_builtins (void)
13710 ix86_init_mmx_sse_builtins ();
13713 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
13714 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
13717 ix86_init_mmx_sse_builtins (void)
13719 const struct builtin_description * d;
13722 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
13723 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
13724 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
13725 tree V2DI_type_node
13726 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
13727 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
13728 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
13729 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
13730 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
13731 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
13732 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
13734 tree pchar_type_node = build_pointer_type (char_type_node);
13735 tree pcchar_type_node = build_pointer_type (
13736 build_type_variant (char_type_node, 1, 0));
13737 tree pfloat_type_node = build_pointer_type (float_type_node);
13738 tree pcfloat_type_node = build_pointer_type (
13739 build_type_variant (float_type_node, 1, 0));
13740 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
13741 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
13742 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
13745 tree int_ftype_v4sf_v4sf
13746 = build_function_type_list (integer_type_node,
13747 V4SF_type_node, V4SF_type_node, NULL_TREE);
13748 tree v4si_ftype_v4sf_v4sf
13749 = build_function_type_list (V4SI_type_node,
13750 V4SF_type_node, V4SF_type_node, NULL_TREE);
13751 /* MMX/SSE/integer conversions. */
13752 tree int_ftype_v4sf
13753 = build_function_type_list (integer_type_node,
13754 V4SF_type_node, NULL_TREE);
13755 tree int64_ftype_v4sf
13756 = build_function_type_list (long_long_integer_type_node,
13757 V4SF_type_node, NULL_TREE);
13758 tree int_ftype_v8qi
13759 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
13760 tree v4sf_ftype_v4sf_int
13761 = build_function_type_list (V4SF_type_node,
13762 V4SF_type_node, integer_type_node, NULL_TREE);
13763 tree v4sf_ftype_v4sf_int64
13764 = build_function_type_list (V4SF_type_node,
13765 V4SF_type_node, long_long_integer_type_node,
13767 tree v4sf_ftype_v4sf_v2si
13768 = build_function_type_list (V4SF_type_node,
13769 V4SF_type_node, V2SI_type_node, NULL_TREE);
13771 /* Miscellaneous. */
13772 tree v8qi_ftype_v4hi_v4hi
13773 = build_function_type_list (V8QI_type_node,
13774 V4HI_type_node, V4HI_type_node, NULL_TREE);
13775 tree v4hi_ftype_v2si_v2si
13776 = build_function_type_list (V4HI_type_node,
13777 V2SI_type_node, V2SI_type_node, NULL_TREE);
13778 tree v4sf_ftype_v4sf_v4sf_int
13779 = build_function_type_list (V4SF_type_node,
13780 V4SF_type_node, V4SF_type_node,
13781 integer_type_node, NULL_TREE);
13782 tree v2si_ftype_v4hi_v4hi
13783 = build_function_type_list (V2SI_type_node,
13784 V4HI_type_node, V4HI_type_node, NULL_TREE);
13785 tree v4hi_ftype_v4hi_int
13786 = build_function_type_list (V4HI_type_node,
13787 V4HI_type_node, integer_type_node, NULL_TREE);
13788 tree v4hi_ftype_v4hi_di
13789 = build_function_type_list (V4HI_type_node,
13790 V4HI_type_node, long_long_unsigned_type_node,
13792 tree v2si_ftype_v2si_di
13793 = build_function_type_list (V2SI_type_node,
13794 V2SI_type_node, long_long_unsigned_type_node,
13796 tree void_ftype_void
13797 = build_function_type (void_type_node, void_list_node);
13798 tree void_ftype_unsigned
13799 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
13800 tree void_ftype_unsigned_unsigned
13801 = build_function_type_list (void_type_node, unsigned_type_node,
13802 unsigned_type_node, NULL_TREE);
13803 tree void_ftype_pcvoid_unsigned_unsigned
13804 = build_function_type_list (void_type_node, const_ptr_type_node,
13805 unsigned_type_node, unsigned_type_node,
13807 tree unsigned_ftype_void
13808 = build_function_type (unsigned_type_node, void_list_node);
13809 tree v2si_ftype_v4sf
13810 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
13811 /* Loads/stores. */
13812 tree void_ftype_v8qi_v8qi_pchar
13813 = build_function_type_list (void_type_node,
13814 V8QI_type_node, V8QI_type_node,
13815 pchar_type_node, NULL_TREE);
13816 tree v4sf_ftype_pcfloat
13817 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
13818 /* @@@ the type is bogus */
13819 tree v4sf_ftype_v4sf_pv2si
13820 = build_function_type_list (V4SF_type_node,
13821 V4SF_type_node, pv2si_type_node, NULL_TREE);
13822 tree void_ftype_pv2si_v4sf
13823 = build_function_type_list (void_type_node,
13824 pv2si_type_node, V4SF_type_node, NULL_TREE);
13825 tree void_ftype_pfloat_v4sf
13826 = build_function_type_list (void_type_node,
13827 pfloat_type_node, V4SF_type_node, NULL_TREE);
13828 tree void_ftype_pdi_di
13829 = build_function_type_list (void_type_node,
13830 pdi_type_node, long_long_unsigned_type_node,
13832 tree void_ftype_pv2di_v2di
13833 = build_function_type_list (void_type_node,
13834 pv2di_type_node, V2DI_type_node, NULL_TREE);
13835 /* Normal vector unops. */
13836 tree v4sf_ftype_v4sf
13837 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
13839 /* Normal vector binops. */
13840 tree v4sf_ftype_v4sf_v4sf
13841 = build_function_type_list (V4SF_type_node,
13842 V4SF_type_node, V4SF_type_node, NULL_TREE);
13843 tree v8qi_ftype_v8qi_v8qi
13844 = build_function_type_list (V8QI_type_node,
13845 V8QI_type_node, V8QI_type_node, NULL_TREE);
13846 tree v4hi_ftype_v4hi_v4hi
13847 = build_function_type_list (V4HI_type_node,
13848 V4HI_type_node, V4HI_type_node, NULL_TREE);
13849 tree v2si_ftype_v2si_v2si
13850 = build_function_type_list (V2SI_type_node,
13851 V2SI_type_node, V2SI_type_node, NULL_TREE);
13852 tree di_ftype_di_di
13853 = build_function_type_list (long_long_unsigned_type_node,
13854 long_long_unsigned_type_node,
13855 long_long_unsigned_type_node, NULL_TREE);
13857 tree v2si_ftype_v2sf
13858 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
13859 tree v2sf_ftype_v2si
13860 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
13861 tree v2si_ftype_v2si
13862 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
13863 tree v2sf_ftype_v2sf
13864 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
13865 tree v2sf_ftype_v2sf_v2sf
13866 = build_function_type_list (V2SF_type_node,
13867 V2SF_type_node, V2SF_type_node, NULL_TREE);
13868 tree v2si_ftype_v2sf_v2sf
13869 = build_function_type_list (V2SI_type_node,
13870 V2SF_type_node, V2SF_type_node, NULL_TREE);
13871 tree pint_type_node = build_pointer_type (integer_type_node);
13872 tree pdouble_type_node = build_pointer_type (double_type_node);
13873 tree pcdouble_type_node = build_pointer_type (
13874 build_type_variant (double_type_node, 1, 0));
13875 tree int_ftype_v2df_v2df
13876 = build_function_type_list (integer_type_node,
13877 V2DF_type_node, V2DF_type_node, NULL_TREE);
13879 tree ti_ftype_ti_ti
13880 = build_function_type_list (intTI_type_node,
13881 intTI_type_node, intTI_type_node, NULL_TREE);
13882 tree void_ftype_pcvoid
13883 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
13884 tree v4sf_ftype_v4si
13885 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
13886 tree v4si_ftype_v4sf
13887 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
13888 tree v2df_ftype_v4si
13889 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
13890 tree v4si_ftype_v2df
13891 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
13892 tree v2si_ftype_v2df
13893 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
13894 tree v4sf_ftype_v2df
13895 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
13896 tree v2df_ftype_v2si
13897 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
13898 tree v2df_ftype_v4sf
13899 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
13900 tree int_ftype_v2df
13901 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
13902 tree int64_ftype_v2df
13903 = build_function_type_list (long_long_integer_type_node,
13904 V2DF_type_node, NULL_TREE);
13905 tree v2df_ftype_v2df_int
13906 = build_function_type_list (V2DF_type_node,
13907 V2DF_type_node, integer_type_node, NULL_TREE);
13908 tree v2df_ftype_v2df_int64
13909 = build_function_type_list (V2DF_type_node,
13910 V2DF_type_node, long_long_integer_type_node,
13912 tree v4sf_ftype_v4sf_v2df
13913 = build_function_type_list (V4SF_type_node,
13914 V4SF_type_node, V2DF_type_node, NULL_TREE);
13915 tree v2df_ftype_v2df_v4sf
13916 = build_function_type_list (V2DF_type_node,
13917 V2DF_type_node, V4SF_type_node, NULL_TREE);
13918 tree v2df_ftype_v2df_v2df_int
13919 = build_function_type_list (V2DF_type_node,
13920 V2DF_type_node, V2DF_type_node,
13923 tree v2df_ftype_v2df_pcdouble
13924 = build_function_type_list (V2DF_type_node,
13925 V2DF_type_node, pcdouble_type_node, NULL_TREE);
13926 tree void_ftype_pdouble_v2df
13927 = build_function_type_list (void_type_node,
13928 pdouble_type_node, V2DF_type_node, NULL_TREE);
13929 tree void_ftype_pint_int
13930 = build_function_type_list (void_type_node,
13931 pint_type_node, integer_type_node, NULL_TREE);
13932 tree void_ftype_v16qi_v16qi_pchar
13933 = build_function_type_list (void_type_node,
13934 V16QI_type_node, V16QI_type_node,
13935 pchar_type_node, NULL_TREE);
13936 tree v2df_ftype_pcdouble
13937 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
13938 tree v2df_ftype_v2df_v2df
13939 = build_function_type_list (V2DF_type_node,
13940 V2DF_type_node, V2DF_type_node, NULL_TREE);
13941 tree v16qi_ftype_v16qi_v16qi
13942 = build_function_type_list (V16QI_type_node,
13943 V16QI_type_node, V16QI_type_node, NULL_TREE);
13944 tree v8hi_ftype_v8hi_v8hi
13945 = build_function_type_list (V8HI_type_node,
13946 V8HI_type_node, V8HI_type_node, NULL_TREE);
13947 tree v4si_ftype_v4si_v4si
13948 = build_function_type_list (V4SI_type_node,
13949 V4SI_type_node, V4SI_type_node, NULL_TREE);
13950 tree v2di_ftype_v2di_v2di
13951 = build_function_type_list (V2DI_type_node,
13952 V2DI_type_node, V2DI_type_node, NULL_TREE);
13953 tree v2di_ftype_v2df_v2df
13954 = build_function_type_list (V2DI_type_node,
13955 V2DF_type_node, V2DF_type_node, NULL_TREE);
13956 tree v2df_ftype_v2df
13957 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
13958 tree v2di_ftype_v2di_int
13959 = build_function_type_list (V2DI_type_node,
13960 V2DI_type_node, integer_type_node, NULL_TREE);
13961 tree v4si_ftype_v4si_int
13962 = build_function_type_list (V4SI_type_node,
13963 V4SI_type_node, integer_type_node, NULL_TREE);
13964 tree v8hi_ftype_v8hi_int
13965 = build_function_type_list (V8HI_type_node,
13966 V8HI_type_node, integer_type_node, NULL_TREE);
13967 tree v8hi_ftype_v8hi_v2di
13968 = build_function_type_list (V8HI_type_node,
13969 V8HI_type_node, V2DI_type_node, NULL_TREE);
13970 tree v4si_ftype_v4si_v2di
13971 = build_function_type_list (V4SI_type_node,
13972 V4SI_type_node, V2DI_type_node, NULL_TREE);
13973 tree v4si_ftype_v8hi_v8hi
13974 = build_function_type_list (V4SI_type_node,
13975 V8HI_type_node, V8HI_type_node, NULL_TREE);
13976 tree di_ftype_v8qi_v8qi
13977 = build_function_type_list (long_long_unsigned_type_node,
13978 V8QI_type_node, V8QI_type_node, NULL_TREE);
13979 tree di_ftype_v2si_v2si
13980 = build_function_type_list (long_long_unsigned_type_node,
13981 V2SI_type_node, V2SI_type_node, NULL_TREE);
13982 tree v2di_ftype_v16qi_v16qi
13983 = build_function_type_list (V2DI_type_node,
13984 V16QI_type_node, V16QI_type_node, NULL_TREE);
13985 tree v2di_ftype_v4si_v4si
13986 = build_function_type_list (V2DI_type_node,
13987 V4SI_type_node, V4SI_type_node, NULL_TREE);
13988 tree int_ftype_v16qi
13989 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
13990 tree v16qi_ftype_pcchar
13991 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
13992 tree void_ftype_pchar_v16qi
13993 = build_function_type_list (void_type_node,
13994 pchar_type_node, V16QI_type_node, NULL_TREE);
13997 tree float128_type;
14000 /* The __float80 type. */
14001 if (TYPE_MODE (long_double_type_node) == XFmode)
14002 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
14006 /* The __float80 type. */
14007 float80_type = make_node (REAL_TYPE);
14008 TYPE_PRECISION (float80_type) = 80;
14009 layout_type (float80_type);
14010 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
14013 float128_type = make_node (REAL_TYPE);
14014 TYPE_PRECISION (float128_type) = 128;
14015 layout_type (float128_type);
14016 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
14018 /* Add all builtins that are more or less simple operations on two
14020 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14022 /* Use one of the operands; the target can have a different mode for
14023 mask-generating compares. */
14024 enum machine_mode mode;
14029 mode = insn_data[d->icode].operand[1].mode;
14034 type = v16qi_ftype_v16qi_v16qi;
14037 type = v8hi_ftype_v8hi_v8hi;
14040 type = v4si_ftype_v4si_v4si;
14043 type = v2di_ftype_v2di_v2di;
14046 type = v2df_ftype_v2df_v2df;
14049 type = ti_ftype_ti_ti;
14052 type = v4sf_ftype_v4sf_v4sf;
14055 type = v8qi_ftype_v8qi_v8qi;
14058 type = v4hi_ftype_v4hi_v4hi;
14061 type = v2si_ftype_v2si_v2si;
14064 type = di_ftype_di_di;
14068 gcc_unreachable ();
14071 /* Override for comparisons. */
14072 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14073 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
14074 type = v4si_ftype_v4sf_v4sf;
14076 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
14077 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14078 type = v2di_ftype_v2df_v2df;
14080 def_builtin (d->mask, d->name, type, d->code);
14083 /* Add the remaining MMX insns with somewhat more complicated types. */
14084 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
14085 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
14086 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
14087 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
14089 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
14090 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
14091 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
14093 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
14094 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
14096 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
14097 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
14099 /* comi/ucomi insns. */
14100 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14101 if (d->mask == MASK_SSE2)
14102 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
14104 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
14106 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
14107 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
14108 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
14110 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
14111 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
14112 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
14113 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
14114 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
14115 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
14116 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
14117 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
14118 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
14119 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
14120 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
14122 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
14124 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
14125 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
14127 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
14128 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
14129 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
14130 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
14132 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
14133 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
14134 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
14135 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
14137 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
14139 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
14141 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
14142 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
14143 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
14144 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
14145 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
14146 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
14148 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
14150 /* Original 3DNow! */
14151 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
14152 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
14153 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
14154 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
14155 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
14156 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
14157 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
14158 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
14159 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
14160 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
14161 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
14162 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
14163 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
14164 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
14165 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
14166 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
14167 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
14168 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
14169 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
14170 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
14172 /* 3DNow! extension as used in the Athlon CPU. */
14173 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
14174 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
14175 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
14176 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
14177 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
14178 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
14181 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
14183 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
14184 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
14186 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
14187 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
14189 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
14190 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
14191 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
14192 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
14193 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
14195 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
14196 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
14197 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
14198 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
14200 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
14201 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
14203 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
14205 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
14206 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
14208 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
14209 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
14210 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
14211 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
14212 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
14214 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
14216 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
14217 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
14218 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
14219 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
14221 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
14222 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
14223 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
14225 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
14226 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
14227 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
14228 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
14230 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
14231 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
14232 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
14234 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
14235 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
14237 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
14238 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
14240 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
14241 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
14242 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
14244 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
14245 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
14246 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
14248 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
14249 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
14251 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
14252 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
14253 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
14254 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
14256 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
14257 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
14258 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
14259 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
14261 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
14262 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
14264 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
14266 /* Prescott New Instructions. */
14267 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
14268 void_ftype_pcvoid_unsigned_unsigned,
14269 IX86_BUILTIN_MONITOR);
14270 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
14271 void_ftype_unsigned_unsigned,
14272 IX86_BUILTIN_MWAIT);
14273 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
14275 IX86_BUILTIN_MOVSHDUP);
14276 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
14278 IX86_BUILTIN_MOVSLDUP);
14279 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
14280 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
14282 /* Access to the vec_init patterns. */
14283 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
14284 integer_type_node, NULL_TREE);
14285 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
14286 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
14288 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
14289 short_integer_type_node,
14290 short_integer_type_node,
14291 short_integer_type_node, NULL_TREE);
14292 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
14293 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
14295 ftype = build_function_type_list (V8QI_type_node, char_type_node,
14296 char_type_node, char_type_node,
14297 char_type_node, char_type_node,
14298 char_type_node, char_type_node,
14299 char_type_node, NULL_TREE);
14300 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
14301 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
14303 /* Access to the vec_extract patterns. */
14304 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14305 integer_type_node, NULL_TREE);
14306 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
14307 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
14309 ftype = build_function_type_list (long_long_integer_type_node,
14310 V2DI_type_node, integer_type_node,
14312 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
14313 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
14315 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14316 integer_type_node, NULL_TREE);
14317 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
14318 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
14320 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14321 integer_type_node, NULL_TREE);
14322 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
14323 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
14325 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14326 integer_type_node, NULL_TREE);
14327 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
14328 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
14330 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
14331 integer_type_node, NULL_TREE);
14332 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
14333 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
14335 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
14336 integer_type_node, NULL_TREE);
14337 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
14338 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
14340 /* Access to the vec_set patterns. */
14341 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14343 integer_type_node, NULL_TREE);
14344 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
14345 ftype, IX86_BUILTIN_VEC_SET_V8HI);
14347 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
14349 integer_type_node, NULL_TREE);
14350 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
14351 ftype, IX86_BUILTIN_VEC_SET_V4HI);
14354 /* Errors in the source file can cause expand_expr to return const0_rtx
14355 where we expect a vector. To avoid crashing, use one of the vector
14356 clear instructions. */
14358 safe_vector_operand (rtx x, enum machine_mode mode)
14360 if (x == const0_rtx)
14361 x = CONST0_RTX (mode);
14365 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
14368 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
14371 tree arg0 = TREE_VALUE (arglist);
14372 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14373 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14374 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14375 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14376 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14377 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
14379 if (VECTOR_MODE_P (mode0))
14380 op0 = safe_vector_operand (op0, mode0);
14381 if (VECTOR_MODE_P (mode1))
14382 op1 = safe_vector_operand (op1, mode1);
14384 if (optimize || !target
14385 || GET_MODE (target) != tmode
14386 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14387 target = gen_reg_rtx (tmode);
14389 if (GET_MODE (op1) == SImode && mode1 == TImode)
14391 rtx x = gen_reg_rtx (V4SImode);
14392 emit_insn (gen_sse2_loadd (x, op1));
14393 op1 = gen_lowpart (TImode, x);
14396 /* The insn must want input operands in the same modes as the
14398 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
14399 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
14401 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
14402 op0 = copy_to_mode_reg (mode0, op0);
14403 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
14404 op1 = copy_to_mode_reg (mode1, op1);
14406 /* ??? Using ix86_fixup_binary_operands is problematic when
14407 we've got mismatched modes. Fake it. */
14413 if (tmode == mode0 && tmode == mode1)
14415 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
14419 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
14421 op0 = force_reg (mode0, op0);
14422 op1 = force_reg (mode1, op1);
14423 target = gen_reg_rtx (tmode);
14426 pat = GEN_FCN (icode) (target, op0, op1);
14433 /* Subroutine of ix86_expand_builtin to take care of stores. */
14436 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
14439 tree arg0 = TREE_VALUE (arglist);
14440 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14441 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14442 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14443 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
14444 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
14446 if (VECTOR_MODE_P (mode1))
14447 op1 = safe_vector_operand (op1, mode1);
14449 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14450 op1 = copy_to_mode_reg (mode1, op1);
14452 pat = GEN_FCN (icode) (op0, op1);
14458 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
14461 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
14462 rtx target, int do_load)
14465 tree arg0 = TREE_VALUE (arglist);
14466 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14467 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14468 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14470 if (optimize || !target
14471 || GET_MODE (target) != tmode
14472 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14473 target = gen_reg_rtx (tmode);
14475 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14478 if (VECTOR_MODE_P (mode0))
14479 op0 = safe_vector_operand (op0, mode0);
14481 if ((optimize && !register_operand (op0, mode0))
14482 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14483 op0 = copy_to_mode_reg (mode0, op0);
14486 pat = GEN_FCN (icode) (target, op0);
14493 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
14494 sqrtss, rsqrtss, rcpss. */
14497 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
14500 tree arg0 = TREE_VALUE (arglist);
14501 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14502 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14503 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14505 if (optimize || !target
14506 || GET_MODE (target) != tmode
14507 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14508 target = gen_reg_rtx (tmode);
14510 if (VECTOR_MODE_P (mode0))
14511 op0 = safe_vector_operand (op0, mode0);
14513 if ((optimize && !register_operand (op0, mode0))
14514 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14515 op0 = copy_to_mode_reg (mode0, op0);
14518 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
14519 op1 = copy_to_mode_reg (mode0, op1);
14521 pat = GEN_FCN (icode) (target, op0, op1);
14528 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
14531 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
14535 tree arg0 = TREE_VALUE (arglist);
14536 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14537 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14538 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14540 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
14541 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
14542 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
14543 enum rtx_code comparison = d->comparison;
14545 if (VECTOR_MODE_P (mode0))
14546 op0 = safe_vector_operand (op0, mode0);
14547 if (VECTOR_MODE_P (mode1))
14548 op1 = safe_vector_operand (op1, mode1);
14550 /* Swap operands if we have a comparison that isn't available in
14552 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
14554 rtx tmp = gen_reg_rtx (mode1);
14555 emit_move_insn (tmp, op1);
14560 if (optimize || !target
14561 || GET_MODE (target) != tmode
14562 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
14563 target = gen_reg_rtx (tmode);
14565 if ((optimize && !register_operand (op0, mode0))
14566 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
14567 op0 = copy_to_mode_reg (mode0, op0);
14568 if ((optimize && !register_operand (op1, mode1))
14569 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
14570 op1 = copy_to_mode_reg (mode1, op1);
14572 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
14573 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
14580 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
14583 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
14587 tree arg0 = TREE_VALUE (arglist);
14588 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14589 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14590 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14592 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
14593 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
14594 enum rtx_code comparison = d->comparison;
14596 if (VECTOR_MODE_P (mode0))
14597 op0 = safe_vector_operand (op0, mode0);
14598 if (VECTOR_MODE_P (mode1))
14599 op1 = safe_vector_operand (op1, mode1);
14601 /* Swap operands if we have a comparison that isn't available in
14603 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
14610 target = gen_reg_rtx (SImode);
14611 emit_move_insn (target, const0_rtx);
14612 target = gen_rtx_SUBREG (QImode, target, 0);
14614 if ((optimize && !register_operand (op0, mode0))
14615 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14616 op0 = copy_to_mode_reg (mode0, op0);
14617 if ((optimize && !register_operand (op1, mode1))
14618 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14619 op1 = copy_to_mode_reg (mode1, op1);
14621 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
14622 pat = GEN_FCN (d->icode) (op0, op1);
14626 emit_insn (gen_rtx_SET (VOIDmode,
14627 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
14628 gen_rtx_fmt_ee (comparison, QImode,
14632 return SUBREG_REG (target);
14635 /* Return the integer constant in ARG. Constrain it to be in the range
14636 of the subparts of VEC_TYPE; issue an error if not. */
14639 get_element_number (tree vec_type, tree arg)
14641 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14643 if (!host_integerp (arg, 1)
14644 || (elt = tree_low_cst (arg, 1), elt > max))
14646 error ("selector must be an integer constant in the range 0..%i", max);
14653 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14654 ix86_expand_vector_init. We DO have language-level syntax for this, in
14655 the form of (type){ init-list }. Except that since we can't place emms
14656 instructions from inside the compiler, we can't allow the use of MMX
14657 registers unless the user explicitly asks for it. So we do *not* define
14658 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
14659 we have builtins invoked by mmintrin.h that gives us license to emit
14660 these sorts of instructions. */
14663 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
14665 enum machine_mode tmode = TYPE_MODE (type);
14666 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
14667 int i, n_elt = GET_MODE_NUNITS (tmode);
14668 rtvec v = rtvec_alloc (n_elt);
14670 gcc_assert (VECTOR_MODE_P (tmode));
14672 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
14674 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
14675 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14678 gcc_assert (arglist == NULL);
14680 if (!target || !register_operand (target, tmode))
14681 target = gen_reg_rtx (tmode);
14683 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
14687 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14688 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
14689 had a language-level syntax for referencing vector elements. */
14692 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
14694 enum machine_mode tmode, mode0;
14699 arg0 = TREE_VALUE (arglist);
14700 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14702 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14703 elt = get_element_number (TREE_TYPE (arg0), arg1);
14705 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14706 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14707 gcc_assert (VECTOR_MODE_P (mode0));
14709 op0 = force_reg (mode0, op0);
14711 if (optimize || !target || !register_operand (target, tmode))
14712 target = gen_reg_rtx (tmode);
14714 ix86_expand_vector_extract (true, target, op0, elt);
14719 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14720 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
14721 a language-level syntax for referencing vector elements. */
14724 ix86_expand_vec_set_builtin (tree arglist)
14726 enum machine_mode tmode, mode1;
14727 tree arg0, arg1, arg2;
14731 arg0 = TREE_VALUE (arglist);
14732 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14733 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14735 tmode = TYPE_MODE (TREE_TYPE (arg0));
14736 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14737 gcc_assert (VECTOR_MODE_P (tmode));
14739 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
14740 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
14741 elt = get_element_number (TREE_TYPE (arg0), arg2);
14743 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14744 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14746 op0 = force_reg (tmode, op0);
14747 op1 = force_reg (mode1, op1);
14749 ix86_expand_vector_set (true, op0, op1, elt);
14754 /* Expand an expression EXP that calls a built-in function,
14755 with result going to TARGET if that's convenient
14756 (and in mode MODE if that's convenient).
14757 SUBTARGET may be used as the target for computing one of EXP's operands.
14758 IGNORE is nonzero if the value is to be ignored. */
14761 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
14762 enum machine_mode mode ATTRIBUTE_UNUSED,
14763 int ignore ATTRIBUTE_UNUSED)
14765 const struct builtin_description *d;
14767 enum insn_code icode;
14768 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
14769 tree arglist = TREE_OPERAND (exp, 1);
14770 tree arg0, arg1, arg2;
14771 rtx op0, op1, op2, pat;
14772 enum machine_mode tmode, mode0, mode1, mode2;
14773 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14777 case IX86_BUILTIN_EMMS:
14778 emit_insn (gen_mmx_emms ());
14781 case IX86_BUILTIN_SFENCE:
14782 emit_insn (gen_sse_sfence ());
14785 case IX86_BUILTIN_MASKMOVQ:
14786 case IX86_BUILTIN_MASKMOVDQU:
14787 icode = (fcode == IX86_BUILTIN_MASKMOVQ
14788 ? CODE_FOR_mmx_maskmovq
14789 : CODE_FOR_sse2_maskmovdqu);
14790 /* Note the arg order is different from the operand order. */
14791 arg1 = TREE_VALUE (arglist);
14792 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
14793 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14794 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14795 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14796 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14797 mode0 = insn_data[icode].operand[0].mode;
14798 mode1 = insn_data[icode].operand[1].mode;
14799 mode2 = insn_data[icode].operand[2].mode;
14801 op0 = force_reg (Pmode, op0);
14802 op0 = gen_rtx_MEM (mode1, op0);
14804 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14805 op0 = copy_to_mode_reg (mode0, op0);
14806 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14807 op1 = copy_to_mode_reg (mode1, op1);
14808 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
14809 op2 = copy_to_mode_reg (mode2, op2);
14810 pat = GEN_FCN (icode) (op0, op1, op2);
14816 case IX86_BUILTIN_SQRTSS:
14817 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
14818 case IX86_BUILTIN_RSQRTSS:
14819 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
14820 case IX86_BUILTIN_RCPSS:
14821 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
14823 case IX86_BUILTIN_LOADUPS:
14824 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
14826 case IX86_BUILTIN_STOREUPS:
14827 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
14829 case IX86_BUILTIN_LOADHPS:
14830 case IX86_BUILTIN_LOADLPS:
14831 case IX86_BUILTIN_LOADHPD:
14832 case IX86_BUILTIN_LOADLPD:
14833 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
14834 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
14835 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
14836 : CODE_FOR_sse2_loadlpd);
14837 arg0 = TREE_VALUE (arglist);
14838 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14839 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14840 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14841 tmode = insn_data[icode].operand[0].mode;
14842 mode0 = insn_data[icode].operand[1].mode;
14843 mode1 = insn_data[icode].operand[2].mode;
14845 op0 = force_reg (mode0, op0);
14846 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
14847 if (optimize || target == 0
14848 || GET_MODE (target) != tmode
14849 || !register_operand (target, tmode))
14850 target = gen_reg_rtx (tmode);
14851 pat = GEN_FCN (icode) (target, op0, op1);
14857 case IX86_BUILTIN_STOREHPS:
14858 case IX86_BUILTIN_STORELPS:
14859 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
14860 : CODE_FOR_sse_storelps);
14861 arg0 = TREE_VALUE (arglist);
14862 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14863 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14864 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14865 mode0 = insn_data[icode].operand[0].mode;
14866 mode1 = insn_data[icode].operand[1].mode;
14868 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14869 op1 = force_reg (mode1, op1);
14871 pat = GEN_FCN (icode) (op0, op1);
14877 case IX86_BUILTIN_MOVNTPS:
14878 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
14879 case IX86_BUILTIN_MOVNTQ:
14880 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
14882 case IX86_BUILTIN_LDMXCSR:
14883 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
14884 target = assign_386_stack_local (SImode, SLOT_TEMP);
14885 emit_move_insn (target, op0);
14886 emit_insn (gen_sse_ldmxcsr (target));
14889 case IX86_BUILTIN_STMXCSR:
14890 target = assign_386_stack_local (SImode, SLOT_TEMP);
14891 emit_insn (gen_sse_stmxcsr (target));
14892 return copy_to_mode_reg (SImode, target);
14894 case IX86_BUILTIN_SHUFPS:
14895 case IX86_BUILTIN_SHUFPD:
14896 icode = (fcode == IX86_BUILTIN_SHUFPS
14897 ? CODE_FOR_sse_shufps
14898 : CODE_FOR_sse2_shufpd);
14899 arg0 = TREE_VALUE (arglist);
14900 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14901 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14902 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14903 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14904 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14905 tmode = insn_data[icode].operand[0].mode;
14906 mode0 = insn_data[icode].operand[1].mode;
14907 mode1 = insn_data[icode].operand[2].mode;
14908 mode2 = insn_data[icode].operand[3].mode;
14910 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14911 op0 = copy_to_mode_reg (mode0, op0);
14912 if ((optimize && !register_operand (op1, mode1))
14913 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
14914 op1 = copy_to_mode_reg (mode1, op1);
14915 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14917 /* @@@ better error message */
14918 error ("mask must be an immediate");
14919 return gen_reg_rtx (tmode);
14921 if (optimize || target == 0
14922 || GET_MODE (target) != tmode
14923 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14924 target = gen_reg_rtx (tmode);
14925 pat = GEN_FCN (icode) (target, op0, op1, op2);
14931 case IX86_BUILTIN_PSHUFW:
14932 case IX86_BUILTIN_PSHUFD:
14933 case IX86_BUILTIN_PSHUFHW:
14934 case IX86_BUILTIN_PSHUFLW:
14935 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
14936 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
14937 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
14938 : CODE_FOR_mmx_pshufw);
14939 arg0 = TREE_VALUE (arglist);
14940 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14941 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14942 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14943 tmode = insn_data[icode].operand[0].mode;
14944 mode1 = insn_data[icode].operand[1].mode;
14945 mode2 = insn_data[icode].operand[2].mode;
14947 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14948 op0 = copy_to_mode_reg (mode1, op0);
14949 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14951 /* @@@ better error message */
14952 error ("mask must be an immediate");
14956 || GET_MODE (target) != tmode
14957 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14958 target = gen_reg_rtx (tmode);
14959 pat = GEN_FCN (icode) (target, op0, op1);
14965 case IX86_BUILTIN_PSLLDQI128:
14966 case IX86_BUILTIN_PSRLDQI128:
14967 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
14968 : CODE_FOR_sse2_lshrti3);
14969 arg0 = TREE_VALUE (arglist);
14970 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14971 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14972 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14973 tmode = insn_data[icode].operand[0].mode;
14974 mode1 = insn_data[icode].operand[1].mode;
14975 mode2 = insn_data[icode].operand[2].mode;
14977 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14979 op0 = copy_to_reg (op0);
14980 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
14982 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14984 error ("shift must be an immediate");
14987 target = gen_reg_rtx (V2DImode);
14988 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
14994 case IX86_BUILTIN_FEMMS:
14995 emit_insn (gen_mmx_femms ());
14998 case IX86_BUILTIN_PAVGUSB:
14999 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
15001 case IX86_BUILTIN_PF2ID:
15002 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
15004 case IX86_BUILTIN_PFACC:
15005 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
15007 case IX86_BUILTIN_PFADD:
15008 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
15010 case IX86_BUILTIN_PFCMPEQ:
15011 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
15013 case IX86_BUILTIN_PFCMPGE:
15014 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
15016 case IX86_BUILTIN_PFCMPGT:
15017 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
15019 case IX86_BUILTIN_PFMAX:
15020 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
15022 case IX86_BUILTIN_PFMIN:
15023 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
15025 case IX86_BUILTIN_PFMUL:
15026 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
15028 case IX86_BUILTIN_PFRCP:
15029 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
15031 case IX86_BUILTIN_PFRCPIT1:
15032 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
15034 case IX86_BUILTIN_PFRCPIT2:
15035 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
15037 case IX86_BUILTIN_PFRSQIT1:
15038 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
15040 case IX86_BUILTIN_PFRSQRT:
15041 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
15043 case IX86_BUILTIN_PFSUB:
15044 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
15046 case IX86_BUILTIN_PFSUBR:
15047 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
15049 case IX86_BUILTIN_PI2FD:
15050 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
15052 case IX86_BUILTIN_PMULHRW:
15053 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
15055 case IX86_BUILTIN_PF2IW:
15056 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
15058 case IX86_BUILTIN_PFNACC:
15059 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
15061 case IX86_BUILTIN_PFPNACC:
15062 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
15064 case IX86_BUILTIN_PI2FW:
15065 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
15067 case IX86_BUILTIN_PSWAPDSI:
15068 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
15070 case IX86_BUILTIN_PSWAPDSF:
15071 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
15073 case IX86_BUILTIN_SQRTSD:
15074 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
15075 case IX86_BUILTIN_LOADUPD:
15076 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
15077 case IX86_BUILTIN_STOREUPD:
15078 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
15080 case IX86_BUILTIN_MFENCE:
15081 emit_insn (gen_sse2_mfence ());
15083 case IX86_BUILTIN_LFENCE:
15084 emit_insn (gen_sse2_lfence ());
15087 case IX86_BUILTIN_CLFLUSH:
15088 arg0 = TREE_VALUE (arglist);
15089 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15090 icode = CODE_FOR_sse2_clflush;
15091 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
15092 op0 = copy_to_mode_reg (Pmode, op0);
15094 emit_insn (gen_sse2_clflush (op0));
15097 case IX86_BUILTIN_MOVNTPD:
15098 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
15099 case IX86_BUILTIN_MOVNTDQ:
15100 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
15101 case IX86_BUILTIN_MOVNTI:
15102 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
15104 case IX86_BUILTIN_LOADDQU:
15105 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
15106 case IX86_BUILTIN_STOREDQU:
15107 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
15109 case IX86_BUILTIN_MONITOR:
15110 arg0 = TREE_VALUE (arglist);
15111 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15112 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15113 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15114 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15115 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15117 op0 = copy_to_mode_reg (SImode, op0);
15119 op1 = copy_to_mode_reg (SImode, op1);
15121 op2 = copy_to_mode_reg (SImode, op2);
15122 emit_insn (gen_sse3_monitor (op0, op1, op2));
15125 case IX86_BUILTIN_MWAIT:
15126 arg0 = TREE_VALUE (arglist);
15127 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15128 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15129 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15131 op0 = copy_to_mode_reg (SImode, op0);
15133 op1 = copy_to_mode_reg (SImode, op1);
15134 emit_insn (gen_sse3_mwait (op0, op1));
15137 case IX86_BUILTIN_LDDQU:
15138 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
15141 case IX86_BUILTIN_VEC_INIT_V2SI:
15142 case IX86_BUILTIN_VEC_INIT_V4HI:
15143 case IX86_BUILTIN_VEC_INIT_V8QI:
15144 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
15146 case IX86_BUILTIN_VEC_EXT_V2DF:
15147 case IX86_BUILTIN_VEC_EXT_V2DI:
15148 case IX86_BUILTIN_VEC_EXT_V4SF:
15149 case IX86_BUILTIN_VEC_EXT_V4SI:
15150 case IX86_BUILTIN_VEC_EXT_V8HI:
15151 case IX86_BUILTIN_VEC_EXT_V2SI:
15152 case IX86_BUILTIN_VEC_EXT_V4HI:
15153 return ix86_expand_vec_ext_builtin (arglist, target);
15155 case IX86_BUILTIN_VEC_SET_V8HI:
15156 case IX86_BUILTIN_VEC_SET_V4HI:
15157 return ix86_expand_vec_set_builtin (arglist);
15163 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15164 if (d->code == fcode)
15166 /* Compares are treated specially. */
15167 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
15168 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
15169 || d->icode == CODE_FOR_sse2_maskcmpv2df3
15170 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
15171 return ix86_expand_sse_compare (d, arglist, target);
15173 return ix86_expand_binop_builtin (d->icode, arglist, target);
15176 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15177 if (d->code == fcode)
15178 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
15180 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
15181 if (d->code == fcode)
15182 return ix86_expand_sse_comi (d, arglist, target);
15184 gcc_unreachable ();
15187 /* Store OPERAND to the memory after reload is completed. This means
15188 that we can't easily use assign_stack_local. */
15190 ix86_force_to_memory (enum machine_mode mode, rtx operand)
15194 gcc_assert (reload_completed);
15195 if (TARGET_RED_ZONE)
15197 result = gen_rtx_MEM (mode,
15198 gen_rtx_PLUS (Pmode,
15200 GEN_INT (-RED_ZONE_SIZE)));
15201 emit_move_insn (result, operand);
15203 else if (!TARGET_RED_ZONE && TARGET_64BIT)
15209 operand = gen_lowpart (DImode, operand);
15213 gen_rtx_SET (VOIDmode,
15214 gen_rtx_MEM (DImode,
15215 gen_rtx_PRE_DEC (DImode,
15216 stack_pointer_rtx)),
15220 gcc_unreachable ();
15222 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15231 split_di (&operand, 1, operands, operands + 1);
15233 gen_rtx_SET (VOIDmode,
15234 gen_rtx_MEM (SImode,
15235 gen_rtx_PRE_DEC (Pmode,
15236 stack_pointer_rtx)),
15239 gen_rtx_SET (VOIDmode,
15240 gen_rtx_MEM (SImode,
15241 gen_rtx_PRE_DEC (Pmode,
15242 stack_pointer_rtx)),
15247 /* It is better to store HImodes as SImodes. */
15248 if (!TARGET_PARTIAL_REG_STALL)
15249 operand = gen_lowpart (SImode, operand);
15253 gen_rtx_SET (VOIDmode,
15254 gen_rtx_MEM (GET_MODE (operand),
15255 gen_rtx_PRE_DEC (SImode,
15256 stack_pointer_rtx)),
15260 gcc_unreachable ();
15262 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15267 /* Free operand from the memory. */
15269 ix86_free_from_memory (enum machine_mode mode)
15271 if (!TARGET_RED_ZONE)
15275 if (mode == DImode || TARGET_64BIT)
15277 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
15281 /* Use LEA to deallocate stack space. In peephole2 it will be converted
15282 to pop or add instruction if registers are available. */
15283 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
15284 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
15289 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
15290 QImode must go into class Q_REGS.
15291 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
15292 movdf to do mem-to-mem moves through integer regs. */
15294 ix86_preferred_reload_class (rtx x, enum reg_class class)
15296 /* We're only allowed to return a subclass of CLASS. Many of the
15297 following checks fail for NO_REGS, so eliminate that early. */
15298 if (class == NO_REGS)
15301 /* All classes can load zeros. */
15302 if (x == CONST0_RTX (GET_MODE (x)))
15305 /* Floating-point constants need more complex checks. */
15306 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
15308 /* General regs can load everything. */
15309 if (reg_class_subset_p (class, GENERAL_REGS))
15312 /* Floats can load 0 and 1 plus some others. Note that we eliminated
15313 zero above. We only want to wind up preferring 80387 registers if
15314 we plan on doing computation with them. */
15316 && (TARGET_MIX_SSE_I387
15317 || !(TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (x))))
15318 && standard_80387_constant_p (x))
15320 /* Limit class to non-sse. */
15321 if (class == FLOAT_SSE_REGS)
15323 if (class == FP_TOP_SSE_REGS)
15325 if (class == FP_SECOND_SSE_REGS)
15326 return FP_SECOND_REG;
15327 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
15333 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
15335 if (MAYBE_SSE_CLASS_P (class) && CONSTANT_P (x))
15338 /* Generally when we see PLUS here, it's the function invariant
15339 (plus soft-fp const_int). Which can only be computed into general
15341 if (GET_CODE (x) == PLUS)
15342 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
15344 /* QImode constants are easy to load, but non-constant QImode data
15345 must go into Q_REGS. */
15346 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
15348 if (reg_class_subset_p (class, Q_REGS))
15350 if (reg_class_subset_p (Q_REGS, class))
15358 /* If we are copying between general and FP registers, we need a memory
15359 location. The same is true for SSE and MMX registers.
15361 The macro can't work reliably when one of the CLASSES is class containing
15362 registers from multiple units (SSE, MMX, integer). We avoid this by never
15363 combining those units in single alternative in the machine description.
15364 Ensure that this constraint holds to avoid unexpected surprises.
15366 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
15367 enforce these sanity checks. */
15370 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
15371 enum machine_mode mode, int strict)
15373 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
15374 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
15375 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
15376 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
15377 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
15378 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
15380 gcc_assert (!strict);
15384 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
15387 /* ??? This is a lie. We do have moves between mmx/general, and for
15388 mmx/sse2. But by saying we need secondary memory we discourage the
15389 register allocator from using the mmx registers unless needed. */
15390 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
15393 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15395 /* SSE1 doesn't have any direct moves from other classes. */
15399 /* If the target says that inter-unit moves are more expensive
15400 than moving through memory, then don't generate them. */
15401 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
15404 /* Between SSE and general, we have moves no larger than word size. */
15405 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
15408 /* ??? For the cost of one register reformat penalty, we could use
15409 the same instructions to move SFmode and DFmode data, but the
15410 relevant move patterns don't support those alternatives. */
15411 if (mode == SFmode || mode == DFmode)
15418 /* Return true if the registers in CLASS cannot represent the change from
15419 modes FROM to TO. */
15422 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
15423 enum reg_class class)
15428 /* x87 registers can't do subreg at all, as all values are reformatted
15429 to extended precision. */
15430 if (MAYBE_FLOAT_CLASS_P (class))
15433 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
15435 /* Vector registers do not support QI or HImode loads. If we don't
15436 disallow a change to these modes, reload will assume it's ok to
15437 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
15438 the vec_dupv4hi pattern. */
15439 if (GET_MODE_SIZE (from) < 4)
15442 /* Vector registers do not support subreg with nonzero offsets, which
15443 are otherwise valid for integer registers. Since we can't see
15444 whether we have a nonzero offset from here, prohibit all
15445 nonparadoxical subregs changing size. */
15446 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
15453 /* Return the cost of moving data from a register in class CLASS1 to
15454 one in class CLASS2.
15456 It is not required that the cost always equal 2 when FROM is the same as TO;
15457 on some machines it is expensive to move between registers if they are not
15458 general registers. */
15461 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
15462 enum reg_class class2)
15464 /* In case we require secondary memory, compute cost of the store followed
15465 by load. In order to avoid bad register allocation choices, we need
15466 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
15468 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
15472 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
15473 MEMORY_MOVE_COST (mode, class1, 1));
15474 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
15475 MEMORY_MOVE_COST (mode, class2, 1));
15477 /* In case of copying from general_purpose_register we may emit multiple
15478 stores followed by single load causing memory size mismatch stall.
15479 Count this as arbitrarily high cost of 20. */
15480 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
15483 /* In the case of FP/MMX moves, the registers actually overlap, and we
15484 have to switch modes in order to treat them differently. */
15485 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
15486 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
15492 /* Moves between SSE/MMX and integer unit are expensive. */
15493 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
15494 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15495 return ix86_cost->mmxsse_to_integer;
15496 if (MAYBE_FLOAT_CLASS_P (class1))
15497 return ix86_cost->fp_move;
15498 if (MAYBE_SSE_CLASS_P (class1))
15499 return ix86_cost->sse_move;
15500 if (MAYBE_MMX_CLASS_P (class1))
15501 return ix86_cost->mmx_move;
15505 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
15508 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
15510 /* Flags and only flags can only hold CCmode values. */
15511 if (CC_REGNO_P (regno))
15512 return GET_MODE_CLASS (mode) == MODE_CC;
15513 if (GET_MODE_CLASS (mode) == MODE_CC
15514 || GET_MODE_CLASS (mode) == MODE_RANDOM
15515 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
15517 if (FP_REGNO_P (regno))
15518 return VALID_FP_MODE_P (mode);
15519 if (SSE_REGNO_P (regno))
15521 /* We implement the move patterns for all vector modes into and
15522 out of SSE registers, even when no operation instructions
15524 return (VALID_SSE_REG_MODE (mode)
15525 || VALID_SSE2_REG_MODE (mode)
15526 || VALID_MMX_REG_MODE (mode)
15527 || VALID_MMX_REG_MODE_3DNOW (mode));
15529 if (MMX_REGNO_P (regno))
15531 /* We implement the move patterns for 3DNOW modes even in MMX mode,
15532 so if the register is available at all, then we can move data of
15533 the given mode into or out of it. */
15534 return (VALID_MMX_REG_MODE (mode)
15535 || VALID_MMX_REG_MODE_3DNOW (mode));
15538 if (mode == QImode)
15540 /* Take care for QImode values - they can be in non-QI regs,
15541 but then they do cause partial register stalls. */
15542 if (regno < 4 || TARGET_64BIT)
15544 if (!TARGET_PARTIAL_REG_STALL)
15546 return reload_in_progress || reload_completed;
15548 /* We handle both integer and floats in the general purpose registers. */
15549 else if (VALID_INT_MODE_P (mode))
15551 else if (VALID_FP_MODE_P (mode))
15553 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
15554 on to use that value in smaller contexts, this can easily force a
15555 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
15556 supporting DImode, allow it. */
15557 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
15563 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
15564 tieable integer mode. */
15567 ix86_tieable_integer_mode_p (enum machine_mode mode)
15576 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
15579 return TARGET_64BIT;
15586 /* Return true if MODE1 is accessible in a register that can hold MODE2
15587 without copying. That is, all register classes that can hold MODE2
15588 can also hold MODE1. */
15591 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
15593 if (mode1 == mode2)
15596 if (ix86_tieable_integer_mode_p (mode1)
15597 && ix86_tieable_integer_mode_p (mode2))
15600 /* MODE2 being XFmode implies fp stack or general regs, which means we
15601 can tie any smaller floating point modes to it. Note that we do not
15602 tie this with TFmode. */
15603 if (mode2 == XFmode)
15604 return mode1 == SFmode || mode1 == DFmode;
15606 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
15607 that we can tie it with SFmode. */
15608 if (mode2 == DFmode)
15609 return mode1 == SFmode;
15611 /* If MODE2 is only appropriate for an SSE register, then tie with
15612 any other mode acceptable to SSE registers. */
15613 if (GET_MODE_SIZE (mode2) >= 8
15614 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
15615 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
15617 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
15618 with any other mode acceptable to MMX registers. */
15619 if (GET_MODE_SIZE (mode2) == 8
15620 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
15621 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
15626 /* Return the cost of moving data of mode M between a
15627 register and memory. A value of 2 is the default; this cost is
15628 relative to those in `REGISTER_MOVE_COST'.
15630 If moving between registers and memory is more expensive than
15631 between two registers, you should define this macro to express the
15634 Model also increased moving costs of QImode registers in non
15638 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
15640 if (FLOAT_CLASS_P (class))
15657 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
15659 if (SSE_CLASS_P (class))
15662 switch (GET_MODE_SIZE (mode))
15676 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
15678 if (MMX_CLASS_P (class))
15681 switch (GET_MODE_SIZE (mode))
15692 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
15694 switch (GET_MODE_SIZE (mode))
15698 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
15699 : ix86_cost->movzbl_load);
15701 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
15702 : ix86_cost->int_store[0] + 4);
15705 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
15707 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
15708 if (mode == TFmode)
15710 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
15711 * (((int) GET_MODE_SIZE (mode)
15712 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
15716 /* Compute a (partial) cost for rtx X. Return true if the complete
15717 cost has been computed, and false if subexpressions should be
15718 scanned. In either case, *TOTAL contains the cost result. */
15721 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
15723 enum machine_mode mode = GET_MODE (x);
15731 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
15733 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
15735 else if (flag_pic && SYMBOLIC_CONST (x)
15737 || (!GET_CODE (x) != LABEL_REF
15738 && (GET_CODE (x) != SYMBOL_REF
15739 || !SYMBOL_REF_LOCAL_P (x)))))
15746 if (mode == VOIDmode)
15749 switch (standard_80387_constant_p (x))
15754 default: /* Other constants */
15759 /* Start with (MEM (SYMBOL_REF)), since that's where
15760 it'll probably end up. Add a penalty for size. */
15761 *total = (COSTS_N_INSNS (1)
15762 + (flag_pic != 0 && !TARGET_64BIT)
15763 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
15769 /* The zero extensions is often completely free on x86_64, so make
15770 it as cheap as possible. */
15771 if (TARGET_64BIT && mode == DImode
15772 && GET_MODE (XEXP (x, 0)) == SImode)
15774 else if (TARGET_ZERO_EXTEND_WITH_AND)
15775 *total = COSTS_N_INSNS (ix86_cost->add);
15777 *total = COSTS_N_INSNS (ix86_cost->movzx);
15781 *total = COSTS_N_INSNS (ix86_cost->movsx);
15785 if (GET_CODE (XEXP (x, 1)) == CONST_INT
15786 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
15788 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
15791 *total = COSTS_N_INSNS (ix86_cost->add);
15794 if ((value == 2 || value == 3)
15795 && ix86_cost->lea <= ix86_cost->shift_const)
15797 *total = COSTS_N_INSNS (ix86_cost->lea);
15807 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
15809 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
15811 if (INTVAL (XEXP (x, 1)) > 32)
15812 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
15814 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
15818 if (GET_CODE (XEXP (x, 1)) == AND)
15819 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
15821 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
15826 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
15827 *total = COSTS_N_INSNS (ix86_cost->shift_const);
15829 *total = COSTS_N_INSNS (ix86_cost->shift_var);
15834 if (FLOAT_MODE_P (mode))
15836 *total = COSTS_N_INSNS (ix86_cost->fmul);
15841 rtx op0 = XEXP (x, 0);
15842 rtx op1 = XEXP (x, 1);
15844 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
15846 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
15847 for (nbits = 0; value != 0; value &= value - 1)
15851 /* This is arbitrary. */
15854 /* Compute costs correctly for widening multiplication. */
15855 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
15856 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
15857 == GET_MODE_SIZE (mode))
15859 int is_mulwiden = 0;
15860 enum machine_mode inner_mode = GET_MODE (op0);
15862 if (GET_CODE (op0) == GET_CODE (op1))
15863 is_mulwiden = 1, op1 = XEXP (op1, 0);
15864 else if (GET_CODE (op1) == CONST_INT)
15866 if (GET_CODE (op0) == SIGN_EXTEND)
15867 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
15870 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
15874 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
15877 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
15878 + nbits * ix86_cost->mult_bit)
15879 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
15888 if (FLOAT_MODE_P (mode))
15889 *total = COSTS_N_INSNS (ix86_cost->fdiv);
15891 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
15895 if (FLOAT_MODE_P (mode))
15896 *total = COSTS_N_INSNS (ix86_cost->fadd);
15897 else if (GET_MODE_CLASS (mode) == MODE_INT
15898 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
15900 if (GET_CODE (XEXP (x, 0)) == PLUS
15901 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
15902 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
15903 && CONSTANT_P (XEXP (x, 1)))
15905 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
15906 if (val == 2 || val == 4 || val == 8)
15908 *total = COSTS_N_INSNS (ix86_cost->lea);
15909 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15910 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
15912 *total += rtx_cost (XEXP (x, 1), outer_code);
15916 else if (GET_CODE (XEXP (x, 0)) == MULT
15917 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
15919 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
15920 if (val == 2 || val == 4 || val == 8)
15922 *total = COSTS_N_INSNS (ix86_cost->lea);
15923 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15924 *total += rtx_cost (XEXP (x, 1), outer_code);
15928 else if (GET_CODE (XEXP (x, 0)) == PLUS)
15930 *total = COSTS_N_INSNS (ix86_cost->lea);
15931 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15932 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15933 *total += rtx_cost (XEXP (x, 1), outer_code);
15940 if (FLOAT_MODE_P (mode))
15942 *total = COSTS_N_INSNS (ix86_cost->fadd);
15950 if (!TARGET_64BIT && mode == DImode)
15952 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
15953 + (rtx_cost (XEXP (x, 0), outer_code)
15954 << (GET_MODE (XEXP (x, 0)) != DImode))
15955 + (rtx_cost (XEXP (x, 1), outer_code)
15956 << (GET_MODE (XEXP (x, 1)) != DImode)));
15962 if (FLOAT_MODE_P (mode))
15964 *total = COSTS_N_INSNS (ix86_cost->fchs);
15970 if (!TARGET_64BIT && mode == DImode)
15971 *total = COSTS_N_INSNS (ix86_cost->add * 2);
15973 *total = COSTS_N_INSNS (ix86_cost->add);
15977 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
15978 && XEXP (XEXP (x, 0), 1) == const1_rtx
15979 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
15980 && XEXP (x, 1) == const0_rtx)
15982 /* This kind of construct is implemented using test[bwl].
15983 Treat it as if we had an AND. */
15984 *total = (COSTS_N_INSNS (ix86_cost->add)
15985 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
15986 + rtx_cost (const1_rtx, outer_code));
15992 if (!TARGET_SSE_MATH
15994 || (mode == DFmode && !TARGET_SSE2))
15999 if (FLOAT_MODE_P (mode))
16000 *total = COSTS_N_INSNS (ix86_cost->fabs);
16004 if (FLOAT_MODE_P (mode))
16005 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
16009 if (XINT (x, 1) == UNSPEC_TP)
16020 static int current_machopic_label_num;
16022 /* Given a symbol name and its associated stub, write out the
16023 definition of the stub. */
16026 machopic_output_stub (FILE *file, const char *symb, const char *stub)
16028 unsigned int length;
16029 char *binder_name, *symbol_name, lazy_ptr_name[32];
16030 int label = ++current_machopic_label_num;
16032 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
16033 symb = (*targetm.strip_name_encoding) (symb);
16035 length = strlen (stub);
16036 binder_name = alloca (length + 32);
16037 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
16039 length = strlen (symb);
16040 symbol_name = alloca (length + 32);
16041 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
16043 sprintf (lazy_ptr_name, "L%d$lz", label);
16046 machopic_picsymbol_stub_section ();
16048 machopic_symbol_stub_section ();
16050 fprintf (file, "%s:\n", stub);
16051 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16055 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
16056 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
16057 fprintf (file, "\tjmp %%edx\n");
16060 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
16062 fprintf (file, "%s:\n", binder_name);
16066 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
16067 fprintf (file, "\tpushl %%eax\n");
16070 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
16072 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
16074 machopic_lazy_symbol_ptr_section ();
16075 fprintf (file, "%s:\n", lazy_ptr_name);
16076 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16077 fprintf (file, "\t.long %s\n", binder_name);
16079 #endif /* TARGET_MACHO */
16081 /* Order the registers for register allocator. */
16084 x86_order_regs_for_local_alloc (void)
16089 /* First allocate the local general purpose registers. */
16090 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16091 if (GENERAL_REGNO_P (i) && call_used_regs[i])
16092 reg_alloc_order [pos++] = i;
16094 /* Global general purpose registers. */
16095 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16096 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
16097 reg_alloc_order [pos++] = i;
16099 /* x87 registers come first in case we are doing FP math
16101 if (!TARGET_SSE_MATH)
16102 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16103 reg_alloc_order [pos++] = i;
16105 /* SSE registers. */
16106 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
16107 reg_alloc_order [pos++] = i;
16108 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
16109 reg_alloc_order [pos++] = i;
16111 /* x87 registers. */
16112 if (TARGET_SSE_MATH)
16113 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16114 reg_alloc_order [pos++] = i;
16116 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
16117 reg_alloc_order [pos++] = i;
16119 /* Initialize the rest of array as we do not allocate some registers
16121 while (pos < FIRST_PSEUDO_REGISTER)
16122 reg_alloc_order [pos++] = 0;
16125 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
16126 struct attribute_spec.handler. */
16128 ix86_handle_struct_attribute (tree *node, tree name,
16129 tree args ATTRIBUTE_UNUSED,
16130 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
16133 if (DECL_P (*node))
16135 if (TREE_CODE (*node) == TYPE_DECL)
16136 type = &TREE_TYPE (*node);
16141 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
16142 || TREE_CODE (*type) == UNION_TYPE)))
16144 warning (OPT_Wattributes, "%qs attribute ignored",
16145 IDENTIFIER_POINTER (name));
16146 *no_add_attrs = true;
16149 else if ((is_attribute_p ("ms_struct", name)
16150 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
16151 || ((is_attribute_p ("gcc_struct", name)
16152 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
16154 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
16155 IDENTIFIER_POINTER (name));
16156 *no_add_attrs = true;
16163 ix86_ms_bitfield_layout_p (tree record_type)
16165 return (TARGET_MS_BITFIELD_LAYOUT &&
16166 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
16167 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
16170 /* Returns an expression indicating where the this parameter is
16171 located on entry to the FUNCTION. */
16174 x86_this_parameter (tree function)
16176 tree type = TREE_TYPE (function);
16180 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
16181 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
16184 if (ix86_function_regparm (type, function) > 0)
16188 parm = TYPE_ARG_TYPES (type);
16189 /* Figure out whether or not the function has a variable number of
16191 for (; parm; parm = TREE_CHAIN (parm))
16192 if (TREE_VALUE (parm) == void_type_node)
16194 /* If not, the this parameter is in the first argument. */
16198 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
16200 return gen_rtx_REG (SImode, regno);
16204 if (aggregate_value_p (TREE_TYPE (type), type))
16205 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
16207 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
16210 /* Determine whether x86_output_mi_thunk can succeed. */
16213 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
16214 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
16215 HOST_WIDE_INT vcall_offset, tree function)
16217 /* 64-bit can handle anything. */
16221 /* For 32-bit, everything's fine if we have one free register. */
16222 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
16225 /* Need a free register for vcall_offset. */
16229 /* Need a free register for GOT references. */
16230 if (flag_pic && !(*targetm.binds_local_p) (function))
16233 /* Otherwise ok. */
16237 /* Output the assembler code for a thunk function. THUNK_DECL is the
16238 declaration for the thunk function itself, FUNCTION is the decl for
16239 the target function. DELTA is an immediate constant offset to be
16240 added to THIS. If VCALL_OFFSET is nonzero, the word at
16241 *(*this + vcall_offset) should be added to THIS. */
16244 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
16245 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
16246 HOST_WIDE_INT vcall_offset, tree function)
16249 rtx this = x86_this_parameter (function);
16252 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
16253 pull it in now and let DELTA benefit. */
16256 else if (vcall_offset)
16258 /* Put the this parameter into %eax. */
16260 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
16261 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16264 this_reg = NULL_RTX;
16266 /* Adjust the this parameter by a fixed constant. */
16269 xops[0] = GEN_INT (delta);
16270 xops[1] = this_reg ? this_reg : this;
16273 if (!x86_64_general_operand (xops[0], DImode))
16275 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16277 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
16281 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16284 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16287 /* Adjust the this parameter by a value stored in the vtable. */
16291 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16294 int tmp_regno = 2 /* ECX */;
16295 if (lookup_attribute ("fastcall",
16296 TYPE_ATTRIBUTES (TREE_TYPE (function))))
16297 tmp_regno = 0 /* EAX */;
16298 tmp = gen_rtx_REG (SImode, tmp_regno);
16301 xops[0] = gen_rtx_MEM (Pmode, this_reg);
16304 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16306 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16308 /* Adjust the this parameter. */
16309 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
16310 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
16312 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
16313 xops[0] = GEN_INT (vcall_offset);
16315 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16316 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
16318 xops[1] = this_reg;
16320 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16322 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16325 /* If necessary, drop THIS back to its stack slot. */
16326 if (this_reg && this_reg != this)
16328 xops[0] = this_reg;
16330 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16333 xops[0] = XEXP (DECL_RTL (function), 0);
16336 if (!flag_pic || (*targetm.binds_local_p) (function))
16337 output_asm_insn ("jmp\t%P0", xops);
16340 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
16341 tmp = gen_rtx_CONST (Pmode, tmp);
16342 tmp = gen_rtx_MEM (QImode, tmp);
16344 output_asm_insn ("jmp\t%A0", xops);
16349 if (!flag_pic || (*targetm.binds_local_p) (function))
16350 output_asm_insn ("jmp\t%P0", xops);
16355 rtx sym_ref = XEXP (DECL_RTL (function), 0);
16356 tmp = (gen_rtx_SYMBOL_REF
16358 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
16359 tmp = gen_rtx_MEM (QImode, tmp);
16361 output_asm_insn ("jmp\t%0", xops);
16364 #endif /* TARGET_MACHO */
16366 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
16367 output_set_got (tmp);
16370 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
16371 output_asm_insn ("jmp\t{*}%1", xops);
16377 x86_file_start (void)
16379 default_file_start ();
16380 if (X86_FILE_START_VERSION_DIRECTIVE)
16381 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
16382 if (X86_FILE_START_FLTUSED)
16383 fputs ("\t.global\t__fltused\n", asm_out_file);
16384 if (ix86_asm_dialect == ASM_INTEL)
16385 fputs ("\t.intel_syntax\n", asm_out_file);
16389 x86_field_alignment (tree field, int computed)
16391 enum machine_mode mode;
16392 tree type = TREE_TYPE (field);
16394 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
16396 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
16397 ? get_inner_array_type (type) : type);
16398 if (mode == DFmode || mode == DCmode
16399 || GET_MODE_CLASS (mode) == MODE_INT
16400 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
16401 return MIN (32, computed);
16405 /* Output assembler code to FILE to increment profiler label # LABELNO
16406 for profiling a function entry. */
16408 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
16413 #ifndef NO_PROFILE_COUNTERS
16414 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
16416 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
16420 #ifndef NO_PROFILE_COUNTERS
16421 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
16423 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16427 #ifndef NO_PROFILE_COUNTERS
16428 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
16429 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
16431 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
16435 #ifndef NO_PROFILE_COUNTERS
16436 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
16437 PROFILE_COUNT_REGISTER);
16439 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16443 /* We don't have exact information about the insn sizes, but we may assume
16444 quite safely that we are informed about all 1 byte insns and memory
16445 address sizes. This is enough to eliminate unnecessary padding in
16449 min_insn_size (rtx insn)
16453 if (!INSN_P (insn) || !active_insn_p (insn))
16456 /* Discard alignments we've emit and jump instructions. */
16457 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
16458 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
16460 if (GET_CODE (insn) == JUMP_INSN
16461 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
16462 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
16465 /* Important case - calls are always 5 bytes.
16466 It is common to have many calls in the row. */
16467 if (GET_CODE (insn) == CALL_INSN
16468 && symbolic_reference_mentioned_p (PATTERN (insn))
16469 && !SIBLING_CALL_P (insn))
16471 if (get_attr_length (insn) <= 1)
16474 /* For normal instructions we may rely on the sizes of addresses
16475 and the presence of symbol to require 4 bytes of encoding.
16476 This is not the case for jumps where references are PC relative. */
16477 if (GET_CODE (insn) != JUMP_INSN)
16479 l = get_attr_length_address (insn);
16480 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
16489 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
16493 ix86_avoid_jump_misspredicts (void)
16495 rtx insn, start = get_insns ();
16496 int nbytes = 0, njumps = 0;
16499 /* Look for all minimal intervals of instructions containing 4 jumps.
16500 The intervals are bounded by START and INSN. NBYTES is the total
16501 size of instructions in the interval including INSN and not including
16502 START. When the NBYTES is smaller than 16 bytes, it is possible
16503 that the end of START and INSN ends up in the same 16byte page.
16505 The smallest offset in the page INSN can start is the case where START
16506 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
16507 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
16509 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
16512 nbytes += min_insn_size (insn);
16514 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
16515 INSN_UID (insn), min_insn_size (insn));
16516 if ((GET_CODE (insn) == JUMP_INSN
16517 && GET_CODE (PATTERN (insn)) != ADDR_VEC
16518 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
16519 || GET_CODE (insn) == CALL_INSN)
16526 start = NEXT_INSN (start);
16527 if ((GET_CODE (start) == JUMP_INSN
16528 && GET_CODE (PATTERN (start)) != ADDR_VEC
16529 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
16530 || GET_CODE (start) == CALL_INSN)
16531 njumps--, isjump = 1;
16534 nbytes -= min_insn_size (start);
16536 gcc_assert (njumps >= 0);
16538 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
16539 INSN_UID (start), INSN_UID (insn), nbytes);
16541 if (njumps == 3 && isjump && nbytes < 16)
16543 int padsize = 15 - nbytes + min_insn_size (insn);
16546 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
16547 INSN_UID (insn), padsize);
16548 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
16553 /* AMD Athlon works faster
16554 when RET is not destination of conditional jump or directly preceded
16555 by other jump instruction. We avoid the penalty by inserting NOP just
16556 before the RET instructions in such cases. */
16558 ix86_pad_returns (void)
16563 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
16565 basic_block bb = e->src;
16566 rtx ret = BB_END (bb);
16568 bool replace = false;
16570 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
16571 || !maybe_hot_bb_p (bb))
16573 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
16574 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
16576 if (prev && GET_CODE (prev) == CODE_LABEL)
16581 FOR_EACH_EDGE (e, ei, bb->preds)
16582 if (EDGE_FREQUENCY (e) && e->src->index >= 0
16583 && !(e->flags & EDGE_FALLTHRU))
16588 prev = prev_active_insn (ret);
16590 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
16591 || GET_CODE (prev) == CALL_INSN))
16593 /* Empty functions get branch mispredict even when the jump destination
16594 is not visible to us. */
16595 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
16600 emit_insn_before (gen_return_internal_long (), ret);
16606 /* Implement machine specific optimizations. We implement padding of returns
16607 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
16611 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
16612 ix86_pad_returns ();
16613 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
16614 ix86_avoid_jump_misspredicts ();
16617 /* Return nonzero when QImode register that must be represented via REX prefix
16620 x86_extended_QIreg_mentioned_p (rtx insn)
16623 extract_insn_cached (insn);
16624 for (i = 0; i < recog_data.n_operands; i++)
16625 if (REG_P (recog_data.operand[i])
16626 && REGNO (recog_data.operand[i]) >= 4)
16631 /* Return nonzero when P points to register encoded via REX prefix.
16632 Called via for_each_rtx. */
16634 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
16636 unsigned int regno;
16639 regno = REGNO (*p);
16640 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
16643 /* Return true when INSN mentions register that must be encoded using REX
16646 x86_extended_reg_mentioned_p (rtx insn)
16648 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
16651 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
16652 optabs would emit if we didn't have TFmode patterns. */
16655 x86_emit_floatuns (rtx operands[2])
16657 rtx neglab, donelab, i0, i1, f0, in, out;
16658 enum machine_mode mode, inmode;
16660 inmode = GET_MODE (operands[1]);
16661 gcc_assert (inmode == SImode || inmode == DImode);
16664 in = force_reg (inmode, operands[1]);
16665 mode = GET_MODE (out);
16666 neglab = gen_label_rtx ();
16667 donelab = gen_label_rtx ();
16668 i1 = gen_reg_rtx (Pmode);
16669 f0 = gen_reg_rtx (mode);
16671 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
16673 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
16674 emit_jump_insn (gen_jump (donelab));
16677 emit_label (neglab);
16679 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
16680 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
16681 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
16682 expand_float (f0, i0, 0);
16683 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
16685 emit_label (donelab);
16688 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
16689 with all elements equal to VAR. Return true if successful. */
16692 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
16693 rtx target, rtx val)
16695 enum machine_mode smode, wsmode, wvmode;
16702 if (!mmx_ok && !TARGET_SSE)
16710 val = force_reg (GET_MODE_INNER (mode), val);
16711 x = gen_rtx_VEC_DUPLICATE (mode, val);
16712 emit_insn (gen_rtx_SET (VOIDmode, target, x));
16718 if (TARGET_SSE || TARGET_3DNOW_A)
16720 val = gen_lowpart (SImode, val);
16721 x = gen_rtx_TRUNCATE (HImode, val);
16722 x = gen_rtx_VEC_DUPLICATE (mode, x);
16723 emit_insn (gen_rtx_SET (VOIDmode, target, x));
16752 /* Replicate the value once into the next wider mode and recurse. */
16753 val = convert_modes (wsmode, smode, val, true);
16754 x = expand_simple_binop (wsmode, ASHIFT, val,
16755 GEN_INT (GET_MODE_BITSIZE (smode)),
16756 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16757 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
16759 x = gen_reg_rtx (wvmode);
16760 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
16761 gcc_unreachable ();
16762 emit_move_insn (target, gen_lowpart (mode, x));
16770 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
16771 whose low element is VAR, and other elements are zero. Return true
16775 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
16776 rtx target, rtx var)
16778 enum machine_mode vsimode;
16785 if (!mmx_ok && !TARGET_SSE)
16791 var = force_reg (GET_MODE_INNER (mode), var);
16792 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
16793 emit_insn (gen_rtx_SET (VOIDmode, target, x));
16798 var = force_reg (GET_MODE_INNER (mode), var);
16799 x = gen_rtx_VEC_DUPLICATE (mode, var);
16800 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
16801 emit_insn (gen_rtx_SET (VOIDmode, target, x));
16806 vsimode = V4SImode;
16812 vsimode = V2SImode;
16815 /* Zero extend the variable element to SImode and recurse. */
16816 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
16818 x = gen_reg_rtx (vsimode);
16819 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
16820 gcc_unreachable ();
16822 emit_move_insn (target, gen_lowpart (mode, x));
16830 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
16831 consisting of the values in VALS. It is known that all elements
16832 except ONE_VAR are constants. Return true if successful. */
16835 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
16836 rtx target, rtx vals, int one_var)
16838 rtx var = XVECEXP (vals, 0, one_var);
16839 enum machine_mode wmode;
16842 XVECEXP (vals, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
16843 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
16851 /* For the two element vectors, it's just as easy to use
16852 the general case. */
16868 /* There's no way to set one QImode entry easily. Combine
16869 the variable value with its adjacent constant value, and
16870 promote to an HImode set. */
16871 x = XVECEXP (vals, 0, one_var ^ 1);
16874 var = convert_modes (HImode, QImode, var, true);
16875 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
16876 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16877 x = GEN_INT (INTVAL (x) & 0xff);
16881 var = convert_modes (HImode, QImode, var, true);
16882 x = gen_int_mode (INTVAL (x) << 8, HImode);
16884 if (x != const0_rtx)
16885 var = expand_simple_binop (HImode, IOR, var, x, var,
16886 1, OPTAB_LIB_WIDEN);
16888 x = gen_reg_rtx (wmode);
16889 emit_move_insn (x, gen_lowpart (wmode, const_vec));
16890 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
16892 emit_move_insn (target, gen_lowpart (mode, x));
16899 emit_move_insn (target, const_vec);
16900 ix86_expand_vector_set (mmx_ok, target, var, one_var);
16904 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
16905 all values variable, and none identical. */
16908 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
16909 rtx target, rtx vals)
16911 enum machine_mode half_mode = GET_MODE_INNER (mode);
16912 rtx op0 = NULL, op1 = NULL;
16913 bool use_vec_concat = false;
16919 if (!mmx_ok && !TARGET_SSE)
16925 /* For the two element vectors, we always implement VEC_CONCAT. */
16926 op0 = XVECEXP (vals, 0, 0);
16927 op1 = XVECEXP (vals, 0, 1);
16928 use_vec_concat = true;
16932 half_mode = V2SFmode;
16935 half_mode = V2SImode;
16941 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
16942 Recurse to load the two halves. */
16944 op0 = gen_reg_rtx (half_mode);
16945 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
16946 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
16948 op1 = gen_reg_rtx (half_mode);
16949 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
16950 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
16952 use_vec_concat = true;
16963 gcc_unreachable ();
16966 if (use_vec_concat)
16968 if (!register_operand (op0, half_mode))
16969 op0 = force_reg (half_mode, op0);
16970 if (!register_operand (op1, half_mode))
16971 op1 = force_reg (half_mode, op1);
16973 emit_insn (gen_rtx_SET (VOIDmode, target,
16974 gen_rtx_VEC_CONCAT (mode, op0, op1)));
16978 int i, j, n_elts, n_words, n_elt_per_word;
16979 enum machine_mode inner_mode;
16980 rtx words[4], shift;
16982 inner_mode = GET_MODE_INNER (mode);
16983 n_elts = GET_MODE_NUNITS (mode);
16984 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
16985 n_elt_per_word = n_elts / n_words;
16986 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
16988 for (i = 0; i < n_words; ++i)
16990 rtx word = NULL_RTX;
16992 for (j = 0; j < n_elt_per_word; ++j)
16994 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
16995 elt = convert_modes (word_mode, inner_mode, elt, true);
17001 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
17002 word, 1, OPTAB_LIB_WIDEN);
17003 word = expand_simple_binop (word_mode, IOR, word, elt,
17004 word, 1, OPTAB_LIB_WIDEN);
17012 emit_move_insn (target, gen_lowpart (mode, words[0]));
17013 else if (n_words == 2)
17015 rtx tmp = gen_reg_rtx (mode);
17016 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
17017 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
17018 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
17019 emit_move_insn (target, tmp);
17021 else if (n_words == 4)
17023 rtx tmp = gen_reg_rtx (V4SImode);
17024 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
17025 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
17026 emit_move_insn (target, gen_lowpart (mode, tmp));
17029 gcc_unreachable ();
17033 /* Initialize vector TARGET via VALS. Suppress the use of MMX
17034 instructions unless MMX_OK is true. */
17037 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
17039 enum machine_mode mode = GET_MODE (target);
17040 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17041 int n_elts = GET_MODE_NUNITS (mode);
17042 int n_var = 0, one_var = -1;
17043 bool all_same = true, all_const_zero = true;
17047 for (i = 0; i < n_elts; ++i)
17049 x = XVECEXP (vals, 0, i);
17050 if (!CONSTANT_P (x))
17051 n_var++, one_var = i;
17052 else if (x != CONST0_RTX (inner_mode))
17053 all_const_zero = false;
17054 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
17058 /* Constants are best loaded from the constant pool. */
17061 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
17065 /* If all values are identical, broadcast the value. */
17067 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
17068 XVECEXP (vals, 0, 0)))
17071 /* Values where only one field is non-constant are best loaded from
17072 the pool and overwritten via move later. */
17075 if (all_const_zero && one_var == 0
17076 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
17077 XVECEXP (vals, 0, 0)))
17080 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
17084 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
17088 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
17090 enum machine_mode mode = GET_MODE (target);
17091 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17092 bool use_vec_merge = false;
17101 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
17102 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
17104 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
17106 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
17107 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17117 /* For the two element vectors, we implement a VEC_CONCAT with
17118 the extraction of the other element. */
17120 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
17121 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
17124 op0 = val, op1 = tmp;
17126 op0 = tmp, op1 = val;
17128 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
17129 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17137 use_vec_merge = true;
17141 /* tmp = target = A B C D */
17142 tmp = copy_to_reg (target);
17143 /* target = A A B B */
17144 emit_insn (gen_sse_unpcklps (target, target, target));
17145 /* target = X A B B */
17146 ix86_expand_vector_set (false, target, val, 0);
17147 /* target = A X C D */
17148 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17149 GEN_INT (1), GEN_INT (0),
17150 GEN_INT (2+4), GEN_INT (3+4)));
17154 /* tmp = target = A B C D */
17155 tmp = copy_to_reg (target);
17156 /* tmp = X B C D */
17157 ix86_expand_vector_set (false, tmp, val, 0);
17158 /* target = A B X D */
17159 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17160 GEN_INT (0), GEN_INT (1),
17161 GEN_INT (0+4), GEN_INT (3+4)));
17165 /* tmp = target = A B C D */
17166 tmp = copy_to_reg (target);
17167 /* tmp = X B C D */
17168 ix86_expand_vector_set (false, tmp, val, 0);
17169 /* target = A B X D */
17170 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17171 GEN_INT (0), GEN_INT (1),
17172 GEN_INT (2+4), GEN_INT (0+4)));
17176 gcc_unreachable ();
17181 /* Element 0 handled by vec_merge below. */
17184 use_vec_merge = true;
17190 /* With SSE2, use integer shuffles to swap element 0 and ELT,
17191 store into element 0, then shuffle them back. */
17195 order[0] = GEN_INT (elt);
17196 order[1] = const1_rtx;
17197 order[2] = const2_rtx;
17198 order[3] = GEN_INT (3);
17199 order[elt] = const0_rtx;
17201 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17202 order[1], order[2], order[3]));
17204 ix86_expand_vector_set (false, target, val, 0);
17206 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17207 order[1], order[2], order[3]));
17211 /* For SSE1, we have to reuse the V4SF code. */
17212 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
17213 gen_lowpart (SFmode, val), elt);
17218 use_vec_merge = TARGET_SSE2;
17221 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17232 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
17233 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
17234 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17238 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17240 emit_move_insn (mem, target);
17242 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17243 emit_move_insn (tmp, val);
17245 emit_move_insn (target, mem);
17250 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
17252 enum machine_mode mode = GET_MODE (vec);
17253 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17254 bool use_vec_extr = false;
17267 use_vec_extr = true;
17279 tmp = gen_reg_rtx (mode);
17280 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
17281 GEN_INT (elt), GEN_INT (elt),
17282 GEN_INT (elt+4), GEN_INT (elt+4)));
17286 tmp = gen_reg_rtx (mode);
17287 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
17291 gcc_unreachable ();
17294 use_vec_extr = true;
17309 tmp = gen_reg_rtx (mode);
17310 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
17311 GEN_INT (elt), GEN_INT (elt),
17312 GEN_INT (elt), GEN_INT (elt)));
17316 tmp = gen_reg_rtx (mode);
17317 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
17321 gcc_unreachable ();
17324 use_vec_extr = true;
17329 /* For SSE1, we have to reuse the V4SF code. */
17330 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
17331 gen_lowpart (V4SFmode, vec), elt);
17337 use_vec_extr = TARGET_SSE2;
17340 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17345 /* ??? Could extract the appropriate HImode element and shift. */
17352 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
17353 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
17355 /* Let the rtl optimizers know about the zero extension performed. */
17356 if (inner_mode == HImode)
17358 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
17359 target = gen_lowpart (SImode, target);
17362 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17366 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17368 emit_move_insn (mem, vec);
17370 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17371 emit_move_insn (target, tmp);
17375 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binar
17376 pattern to reduce; DEST is the destination; IN is the input vector. */
17379 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
17381 rtx tmp1, tmp2, tmp3;
17383 tmp1 = gen_reg_rtx (V4SFmode);
17384 tmp2 = gen_reg_rtx (V4SFmode);
17385 tmp3 = gen_reg_rtx (V4SFmode);
17387 emit_insn (gen_sse_movhlps (tmp1, in, in));
17388 emit_insn (fn (tmp2, tmp1, in));
17390 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
17391 GEN_INT (1), GEN_INT (1),
17392 GEN_INT (1+4), GEN_INT (1+4)));
17393 emit_insn (fn (dest, tmp2, tmp3));
17396 /* Implements target hook vector_mode_supported_p. */
17398 ix86_vector_mode_supported_p (enum machine_mode mode)
17400 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
17402 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
17404 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
17406 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
17411 /* Worker function for TARGET_MD_ASM_CLOBBERS.
17413 We do this in the new i386 backend to maintain source compatibility
17414 with the old cc0-based compiler. */
17417 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
17418 tree inputs ATTRIBUTE_UNUSED,
17421 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
17423 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
17425 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
17430 /* Worker function for REVERSE_CONDITION. */
17433 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
17435 return (mode != CCFPmode && mode != CCFPUmode
17436 ? reverse_condition (code)
17437 : reverse_condition_maybe_unordered (code));
17440 /* Output code to perform an x87 FP register move, from OPERANDS[1]
17444 output_387_reg_move (rtx insn, rtx *operands)
17446 if (REG_P (operands[1])
17447 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
17449 if (REGNO (operands[0]) == FIRST_STACK_REG
17450 && TARGET_USE_FFREEP)
17451 return "ffreep\t%y0";
17452 return "fstp\t%y0";
17454 if (STACK_TOP_P (operands[0]))
17455 return "fld%z1\t%y1";
17459 /* Output code to perform a conditional jump to LABEL, if C2 flag in
17460 FP status register is set. */
17463 ix86_emit_fp_unordered_jump (rtx label)
17465 rtx reg = gen_reg_rtx (HImode);
17468 emit_insn (gen_x86_fnstsw_1 (reg));
17470 if (TARGET_USE_SAHF)
17472 emit_insn (gen_x86_sahf_1 (reg));
17474 temp = gen_rtx_REG (CCmode, FLAGS_REG);
17475 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
17479 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
17481 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17482 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
17485 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
17486 gen_rtx_LABEL_REF (VOIDmode, label),
17488 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
17489 emit_jump_insn (temp);
17492 /* Output code to perform a log1p XFmode calculation. */
17494 void ix86_emit_i387_log1p (rtx op0, rtx op1)
17496 rtx label1 = gen_label_rtx ();
17497 rtx label2 = gen_label_rtx ();
17499 rtx tmp = gen_reg_rtx (XFmode);
17500 rtx tmp2 = gen_reg_rtx (XFmode);
17502 emit_insn (gen_absxf2 (tmp, op1));
17503 emit_insn (gen_cmpxf (tmp,
17504 CONST_DOUBLE_FROM_REAL_VALUE (
17505 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
17507 emit_jump_insn (gen_bge (label1));
17509 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
17510 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
17511 emit_jump (label2);
17513 emit_label (label1);
17514 emit_move_insn (tmp, CONST1_RTX (XFmode));
17515 emit_insn (gen_addxf3 (tmp, op1, tmp));
17516 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
17517 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
17519 emit_label (label2);
17522 /* Solaris named-section hook. Parameters are as for
17523 named_section_real. */
17526 i386_solaris_elf_named_section (const char *name, unsigned int flags,
17529 /* With Binutils 2.15, the "@unwind" marker must be specified on
17530 every occurrence of the ".eh_frame" section, not just the first
17533 && strcmp (name, ".eh_frame") == 0)
17535 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
17536 flags & SECTION_WRITE ? "aw" : "a");
17539 default_elf_asm_named_section (name, flags, decl);
17542 #include "gt-i386.h"