1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
50 #include "tree-gimple.h"
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
64 /* Processor costs (relative to an add) */
66 struct processor_costs size_cost = { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
109 /* Processor costs (relative to an add) */
111 struct processor_costs i386_cost = { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
155 struct processor_costs i486_cost = { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
199 struct processor_costs pentium_cost = {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
243 struct processor_costs pentiumpro_cost = {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
287 struct processor_costs k6_cost = {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
331 struct processor_costs athlon_cost = {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
375 struct processor_costs k8_cost = {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
419 struct processor_costs pentium4_cost = {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
463 struct processor_costs nocona_cost = {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
506 const struct processor_costs *ix86_cost = &pentium_cost;
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
520 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
521 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
522 const int x86_zero_extend_with_and = m_486 | m_PENT;
523 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
524 const int x86_double_with_add = ~m_386;
525 const int x86_use_bit_test = m_386;
526 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
527 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
528 const int x86_3dnow_a = m_ATHLON_K8;
529 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
530 /* Branch hints were put in P4 based on simulation result. But
531 after P4 was made, no performance benefit was observed with
532 branch hints. It also increases the code size. As the result,
533 icc never generates branch hints. */
534 const int x86_branch_hints = 0;
535 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
536 const int x86_partial_reg_stall = m_PPRO;
537 const int x86_use_loop = m_K6;
538 const int x86_use_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
539 const int x86_use_mov0 = m_K6;
540 const int x86_use_cltd = ~(m_PENT | m_K6);
541 const int x86_read_modify_write = ~m_PENT;
542 const int x86_read_modify = ~(m_PENT | m_PPRO);
543 const int x86_split_long_moves = m_PPRO;
544 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
545 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
546 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
547 const int x86_qimode_math = ~(0);
548 const int x86_promote_qi_regs = 0;
549 const int x86_himode_math = ~(m_PPRO);
550 const int x86_promote_hi_regs = m_PPRO;
551 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
552 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
553 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
554 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
556 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
557 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
558 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
559 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
560 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
561 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
562 const int x86_shift1 = ~m_486;
563 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
564 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
565 /* Set for machines where the type and dependencies are resolved on SSE
566 register parts instead of whole registers, so we may maintain just
567 lower part of scalar values in proper format leaving the upper part
569 const int x86_sse_split_regs = m_ATHLON_K8;
570 const int x86_sse_typeless_stores = m_ATHLON_K8;
571 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
572 const int x86_use_ffreep = m_ATHLON_K8;
573 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
574 const int x86_inter_unit_moves = ~(m_ATHLON_K8);
575 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
576 /* Some CPU cores are not able to predict more than 4 branch instructions in
577 the 16 byte window. */
578 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
579 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
580 const int x86_use_bt = m_ATHLON_K8;
582 /* In case the average insn count for single function invocation is
583 lower than this constant, emit fast (but longer) prologue and
585 #define FAST_PROLOGUE_INSN_COUNT 20
587 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
588 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
589 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
590 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
592 /* Array of the smallest class containing reg number REGNO, indexed by
593 REGNO. Used by REGNO_REG_CLASS in i386.h. */
595 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
598 AREG, DREG, CREG, BREG,
600 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
602 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
603 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
606 /* flags, fpsr, dirflag, frame */
607 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
608 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
610 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
612 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
613 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
614 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
618 /* The "default" register map used in 32bit mode. */
620 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
622 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
623 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
624 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
625 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
626 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
627 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
628 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
631 static int const x86_64_int_parameter_registers[6] =
633 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
634 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
637 static int const x86_64_int_return_registers[4] =
639 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
642 /* The "default" register map used in 64bit mode. */
643 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
645 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
646 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
647 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
648 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
649 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
650 8,9,10,11,12,13,14,15, /* extended integer registers */
651 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
654 /* Define the register numbers to be used in Dwarf debugging information.
655 The SVR4 reference port C compiler uses the following register numbers
656 in its Dwarf output code:
657 0 for %eax (gcc regno = 0)
658 1 for %ecx (gcc regno = 2)
659 2 for %edx (gcc regno = 1)
660 3 for %ebx (gcc regno = 3)
661 4 for %esp (gcc regno = 7)
662 5 for %ebp (gcc regno = 6)
663 6 for %esi (gcc regno = 4)
664 7 for %edi (gcc regno = 5)
665 The following three DWARF register numbers are never generated by
666 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
667 believes these numbers have these meanings.
668 8 for %eip (no gcc equivalent)
669 9 for %eflags (gcc regno = 17)
670 10 for %trapno (no gcc equivalent)
671 It is not at all clear how we should number the FP stack registers
672 for the x86 architecture. If the version of SDB on x86/svr4 were
673 a bit less brain dead with respect to floating-point then we would
674 have a precedent to follow with respect to DWARF register numbers
675 for x86 FP registers, but the SDB on x86/svr4 is so completely
676 broken with respect to FP registers that it is hardly worth thinking
677 of it as something to strive for compatibility with.
678 The version of x86/svr4 SDB I have at the moment does (partially)
679 seem to believe that DWARF register number 11 is associated with
680 the x86 register %st(0), but that's about all. Higher DWARF
681 register numbers don't seem to be associated with anything in
682 particular, and even for DWARF regno 11, SDB only seems to under-
683 stand that it should say that a variable lives in %st(0) (when
684 asked via an `=' command) if we said it was in DWARF regno 11,
685 but SDB still prints garbage when asked for the value of the
686 variable in question (via a `/' command).
687 (Also note that the labels SDB prints for various FP stack regs
688 when doing an `x' command are all wrong.)
689 Note that these problems generally don't affect the native SVR4
690 C compiler because it doesn't allow the use of -O with -g and
691 because when it is *not* optimizing, it allocates a memory
692 location for each floating-point variable, and the memory
693 location is what gets described in the DWARF AT_location
694 attribute for the variable in question.
695 Regardless of the severe mental illness of the x86/svr4 SDB, we
696 do something sensible here and we use the following DWARF
697 register numbers. Note that these are all stack-top-relative
699 11 for %st(0) (gcc regno = 8)
700 12 for %st(1) (gcc regno = 9)
701 13 for %st(2) (gcc regno = 10)
702 14 for %st(3) (gcc regno = 11)
703 15 for %st(4) (gcc regno = 12)
704 16 for %st(5) (gcc regno = 13)
705 17 for %st(6) (gcc regno = 14)
706 18 for %st(7) (gcc regno = 15)
708 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
710 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
711 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
712 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
713 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
714 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
715 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
716 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
719 /* Test and compare insns in i386.md store the information needed to
720 generate branch and scc insns here. */
722 rtx ix86_compare_op0 = NULL_RTX;
723 rtx ix86_compare_op1 = NULL_RTX;
725 #define MAX_386_STACK_LOCALS 3
726 /* Size of the register save area. */
727 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
729 /* Define the structure for the machine field in struct function. */
731 struct stack_local_entry GTY(())
736 struct stack_local_entry *next;
739 /* Structure describing stack frame layout.
740 Stack grows downward:
746 saved frame pointer if frame_pointer_needed
747 <- HARD_FRAME_POINTER
753 > to_allocate <- FRAME_POINTER
765 int outgoing_arguments_size;
768 HOST_WIDE_INT to_allocate;
769 /* The offsets relative to ARG_POINTER. */
770 HOST_WIDE_INT frame_pointer_offset;
771 HOST_WIDE_INT hard_frame_pointer_offset;
772 HOST_WIDE_INT stack_pointer_offset;
774 /* When save_regs_using_mov is set, emit prologue using
775 move instead of push instructions. */
776 bool save_regs_using_mov;
779 /* Used to enable/disable debugging features. */
780 const char *ix86_debug_arg_string, *ix86_debug_addr_string;
781 /* Code model option as passed by user. */
782 const char *ix86_cmodel_string;
784 enum cmodel ix86_cmodel;
786 const char *ix86_asm_string;
787 enum asm_dialect ix86_asm_dialect = ASM_ATT;
789 const char *ix86_tls_dialect_string;
790 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
792 /* Which unit we are generating floating point math for. */
793 enum fpmath_unit ix86_fpmath;
795 /* Which cpu are we scheduling for. */
796 enum processor_type ix86_tune;
797 /* Which instruction set architecture to use. */
798 enum processor_type ix86_arch;
800 /* Strings to hold which cpu and instruction set architecture to use. */
801 const char *ix86_tune_string; /* for -mtune=<xxx> */
802 const char *ix86_arch_string; /* for -march=<xxx> */
803 const char *ix86_fpmath_string; /* for -mfpmath=<xxx> */
805 /* # of registers to use to pass arguments. */
806 const char *ix86_regparm_string;
808 /* true if sse prefetch instruction is not NOOP. */
809 int x86_prefetch_sse;
811 /* ix86_regparm_string as a number */
814 /* Alignment to use for loops and jumps: */
816 /* Power of two alignment for loops. */
817 const char *ix86_align_loops_string;
819 /* Power of two alignment for non-loop jumps. */
820 const char *ix86_align_jumps_string;
822 /* Power of two alignment for stack boundary in bytes. */
823 const char *ix86_preferred_stack_boundary_string;
825 /* Preferred alignment for stack boundary in bits. */
826 unsigned int ix86_preferred_stack_boundary;
828 /* Values 1-5: see jump.c */
829 int ix86_branch_cost;
830 const char *ix86_branch_cost_string;
832 /* Power of two alignment for functions. */
833 const char *ix86_align_funcs_string;
835 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
836 char internal_label_prefix[16];
837 int internal_label_prefix_len;
839 static void output_pic_addr_const (FILE *, rtx, int);
840 static void put_condition_code (enum rtx_code, enum machine_mode,
842 static const char *get_some_local_dynamic_name (void);
843 static int get_some_local_dynamic_name_1 (rtx *, void *);
844 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
845 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
847 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
848 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
850 static rtx get_thread_pointer (int);
851 static rtx legitimize_tls_address (rtx, enum tls_model, int);
852 static void get_pc_thunk_name (char [32], unsigned int);
853 static rtx gen_push (rtx);
854 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
855 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
856 static struct machine_function * ix86_init_machine_status (void);
857 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
858 static int ix86_nsaved_regs (void);
859 static void ix86_emit_save_regs (void);
860 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
861 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
862 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
863 static HOST_WIDE_INT ix86_GOT_alias_set (void);
864 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
865 static rtx ix86_expand_aligntest (rtx, int);
866 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
867 static int ix86_issue_rate (void);
868 static int ix86_adjust_cost (rtx, rtx, rtx, int);
869 static int ia32_multipass_dfa_lookahead (void);
870 static void ix86_init_mmx_sse_builtins (void);
871 static rtx x86_this_parameter (tree);
872 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
873 HOST_WIDE_INT, tree);
874 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
875 static void x86_file_start (void);
876 static void ix86_reorg (void);
877 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
878 static tree ix86_build_builtin_va_list (void);
879 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
881 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
882 static bool ix86_vector_mode_supported_p (enum machine_mode);
884 static int ix86_address_cost (rtx);
885 static bool ix86_cannot_force_const_mem (rtx);
886 static rtx ix86_delegitimize_address (rtx);
888 struct builtin_description;
889 static rtx ix86_expand_sse_comi (const struct builtin_description *,
891 static rtx ix86_expand_sse_compare (const struct builtin_description *,
893 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
894 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
895 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
896 static rtx ix86_expand_store_builtin (enum insn_code, tree);
897 static rtx safe_vector_operand (rtx, enum machine_mode);
898 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
899 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
900 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
901 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
902 static int ix86_fp_comparison_cost (enum rtx_code code);
903 static unsigned int ix86_select_alt_pic_regnum (void);
904 static int ix86_save_reg (unsigned int, int);
905 static void ix86_compute_frame_layout (struct ix86_frame *);
906 static int ix86_comp_type_attributes (tree, tree);
907 static int ix86_function_regparm (tree, tree);
908 const struct attribute_spec ix86_attribute_table[];
909 static bool ix86_function_ok_for_sibcall (tree, tree);
910 static tree ix86_handle_cdecl_attribute (tree *, tree, tree, int, bool *);
911 static tree ix86_handle_regparm_attribute (tree *, tree, tree, int, bool *);
912 static int ix86_value_regno (enum machine_mode);
913 static bool contains_128bit_aligned_vector_p (tree);
914 static rtx ix86_struct_value_rtx (tree, int);
915 static bool ix86_ms_bitfield_layout_p (tree);
916 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
917 static int extended_reg_mentioned_1 (rtx *, void *);
918 static bool ix86_rtx_costs (rtx, int, int, int *);
919 static int min_insn_size (rtx);
920 static tree ix86_md_asm_clobbers (tree clobbers);
921 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
922 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
924 static void ix86_init_builtins (void);
925 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
927 /* This function is only used on Solaris. */
928 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
931 /* Register class used for passing given 64bit part of the argument.
932 These represent classes as documented by the PS ABI, with the exception
933 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
934 use SF or DFmode move instead of DImode to avoid reformatting penalties.
936 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
937 whenever possible (upper half does contain padding).
939 enum x86_64_reg_class
942 X86_64_INTEGER_CLASS,
943 X86_64_INTEGERSI_CLASS,
950 X86_64_COMPLEX_X87_CLASS,
953 static const char * const x86_64_reg_class_name[] = {
954 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
955 "sseup", "x87", "x87up", "cplx87", "no"
958 #define MAX_CLASSES 4
960 /* Table of constants used by fldpi, fldln2, etc.... */
961 static REAL_VALUE_TYPE ext_80387_constants_table [5];
962 static bool ext_80387_constants_init = 0;
963 static void init_ext_80387_constants (void);
965 /* Initialize the GCC target structure. */
966 #undef TARGET_ATTRIBUTE_TABLE
967 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
968 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
969 # undef TARGET_MERGE_DECL_ATTRIBUTES
970 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
973 #undef TARGET_COMP_TYPE_ATTRIBUTES
974 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
976 #undef TARGET_INIT_BUILTINS
977 #define TARGET_INIT_BUILTINS ix86_init_builtins
978 #undef TARGET_EXPAND_BUILTIN
979 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
981 #undef TARGET_ASM_FUNCTION_EPILOGUE
982 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
984 #undef TARGET_ASM_OPEN_PAREN
985 #define TARGET_ASM_OPEN_PAREN ""
986 #undef TARGET_ASM_CLOSE_PAREN
987 #define TARGET_ASM_CLOSE_PAREN ""
989 #undef TARGET_ASM_ALIGNED_HI_OP
990 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
991 #undef TARGET_ASM_ALIGNED_SI_OP
992 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
994 #undef TARGET_ASM_ALIGNED_DI_OP
995 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
998 #undef TARGET_ASM_UNALIGNED_HI_OP
999 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1000 #undef TARGET_ASM_UNALIGNED_SI_OP
1001 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1002 #undef TARGET_ASM_UNALIGNED_DI_OP
1003 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1005 #undef TARGET_SCHED_ADJUST_COST
1006 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1007 #undef TARGET_SCHED_ISSUE_RATE
1008 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1009 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1010 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1011 ia32_multipass_dfa_lookahead
1013 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1014 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1017 #undef TARGET_HAVE_TLS
1018 #define TARGET_HAVE_TLS true
1020 #undef TARGET_CANNOT_FORCE_CONST_MEM
1021 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1023 #undef TARGET_DELEGITIMIZE_ADDRESS
1024 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1026 #undef TARGET_MS_BITFIELD_LAYOUT_P
1027 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1029 #undef TARGET_ASM_OUTPUT_MI_THUNK
1030 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1031 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1032 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1034 #undef TARGET_ASM_FILE_START
1035 #define TARGET_ASM_FILE_START x86_file_start
1037 #undef TARGET_RTX_COSTS
1038 #define TARGET_RTX_COSTS ix86_rtx_costs
1039 #undef TARGET_ADDRESS_COST
1040 #define TARGET_ADDRESS_COST ix86_address_cost
1042 #undef TARGET_FIXED_CONDITION_CODE_REGS
1043 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1044 #undef TARGET_CC_MODES_COMPATIBLE
1045 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1047 #undef TARGET_MACHINE_DEPENDENT_REORG
1048 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1050 #undef TARGET_BUILD_BUILTIN_VA_LIST
1051 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1053 #undef TARGET_MD_ASM_CLOBBERS
1054 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1056 #undef TARGET_PROMOTE_PROTOTYPES
1057 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1058 #undef TARGET_STRUCT_VALUE_RTX
1059 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1060 #undef TARGET_SETUP_INCOMING_VARARGS
1061 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1062 #undef TARGET_MUST_PASS_IN_STACK
1063 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1064 #undef TARGET_PASS_BY_REFERENCE
1065 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1067 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1068 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1070 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1071 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1073 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1074 #undef TARGET_INSERT_ATTRIBUTES
1075 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1078 struct gcc_target targetm = TARGET_INITIALIZER;
1081 /* The svr4 ABI for the i386 says that records and unions are returned
1083 #ifndef DEFAULT_PCC_STRUCT_RETURN
1084 #define DEFAULT_PCC_STRUCT_RETURN 1
1087 /* Sometimes certain combinations of command options do not make
1088 sense on a particular target machine. You can define a macro
1089 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1090 defined, is executed once just after all the command options have
1093 Don't use this macro to turn on various extra optimizations for
1094 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1097 override_options (void)
1100 int ix86_tune_defaulted = 0;
1102 /* Comes from final.c -- no real reason to change it. */
1103 #define MAX_CODE_ALIGN 16
1107 const struct processor_costs *cost; /* Processor costs */
1108 const int target_enable; /* Target flags to enable. */
1109 const int target_disable; /* Target flags to disable. */
1110 const int align_loop; /* Default alignments. */
1111 const int align_loop_max_skip;
1112 const int align_jump;
1113 const int align_jump_max_skip;
1114 const int align_func;
1116 const processor_target_table[PROCESSOR_max] =
1118 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1119 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1120 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1121 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1122 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1123 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1124 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1125 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1126 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1129 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1132 const char *const name; /* processor name or nickname. */
1133 const enum processor_type processor;
1134 const enum pta_flags
1140 PTA_PREFETCH_SSE = 16,
1146 const processor_alias_table[] =
1148 {"i386", PROCESSOR_I386, 0},
1149 {"i486", PROCESSOR_I486, 0},
1150 {"i586", PROCESSOR_PENTIUM, 0},
1151 {"pentium", PROCESSOR_PENTIUM, 0},
1152 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1153 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1154 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1155 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1156 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1157 {"i686", PROCESSOR_PENTIUMPRO, 0},
1158 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1159 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1160 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1161 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1162 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1163 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1164 | PTA_MMX | PTA_PREFETCH_SSE},
1165 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1166 | PTA_MMX | PTA_PREFETCH_SSE},
1167 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1168 | PTA_MMX | PTA_PREFETCH_SSE},
1169 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1170 | PTA_MMX | PTA_PREFETCH_SSE},
1171 {"k6", PROCESSOR_K6, PTA_MMX},
1172 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1173 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1174 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1176 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1177 | PTA_3DNOW | PTA_3DNOW_A},
1178 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1179 | PTA_3DNOW_A | PTA_SSE},
1180 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1181 | PTA_3DNOW_A | PTA_SSE},
1182 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1183 | PTA_3DNOW_A | PTA_SSE},
1184 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1185 | PTA_SSE | PTA_SSE2 },
1186 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1187 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1188 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1189 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1190 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1191 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1192 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1193 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1196 int const pta_size = ARRAY_SIZE (processor_alias_table);
1198 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1199 SUBTARGET_OVERRIDE_OPTIONS;
1202 /* Set the default values for switches whose default depends on TARGET_64BIT
1203 in case they weren't overwritten by command line options. */
1206 if (flag_omit_frame_pointer == 2)
1207 flag_omit_frame_pointer = 1;
1208 if (flag_asynchronous_unwind_tables == 2)
1209 flag_asynchronous_unwind_tables = 1;
1210 if (flag_pcc_struct_return == 2)
1211 flag_pcc_struct_return = 0;
1215 if (flag_omit_frame_pointer == 2)
1216 flag_omit_frame_pointer = 0;
1217 if (flag_asynchronous_unwind_tables == 2)
1218 flag_asynchronous_unwind_tables = 0;
1219 if (flag_pcc_struct_return == 2)
1220 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1223 if (!ix86_tune_string && ix86_arch_string)
1224 ix86_tune_string = ix86_arch_string;
1225 if (!ix86_tune_string)
1227 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1228 ix86_tune_defaulted = 1;
1230 if (!ix86_arch_string)
1231 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1233 if (ix86_cmodel_string != 0)
1235 if (!strcmp (ix86_cmodel_string, "small"))
1236 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1238 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1239 else if (!strcmp (ix86_cmodel_string, "32"))
1240 ix86_cmodel = CM_32;
1241 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1242 ix86_cmodel = CM_KERNEL;
1243 else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic)
1244 ix86_cmodel = CM_MEDIUM;
1245 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1246 ix86_cmodel = CM_LARGE;
1248 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1252 ix86_cmodel = CM_32;
1254 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1256 if (ix86_asm_string != 0)
1258 if (!strcmp (ix86_asm_string, "intel"))
1259 ix86_asm_dialect = ASM_INTEL;
1260 else if (!strcmp (ix86_asm_string, "att"))
1261 ix86_asm_dialect = ASM_ATT;
1263 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1265 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1266 error ("code model %qs not supported in the %s bit mode",
1267 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1268 if (ix86_cmodel == CM_LARGE)
1269 sorry ("code model %<large%> not supported yet");
1270 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1271 sorry ("%i-bit mode not compiled in",
1272 (target_flags & MASK_64BIT) ? 64 : 32);
1274 for (i = 0; i < pta_size; i++)
1275 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1277 ix86_arch = processor_alias_table[i].processor;
1278 /* Default cpu tuning to the architecture. */
1279 ix86_tune = ix86_arch;
1280 if (processor_alias_table[i].flags & PTA_MMX
1281 && !(target_flags_explicit & MASK_MMX))
1282 target_flags |= MASK_MMX;
1283 if (processor_alias_table[i].flags & PTA_3DNOW
1284 && !(target_flags_explicit & MASK_3DNOW))
1285 target_flags |= MASK_3DNOW;
1286 if (processor_alias_table[i].flags & PTA_3DNOW_A
1287 && !(target_flags_explicit & MASK_3DNOW_A))
1288 target_flags |= MASK_3DNOW_A;
1289 if (processor_alias_table[i].flags & PTA_SSE
1290 && !(target_flags_explicit & MASK_SSE))
1291 target_flags |= MASK_SSE;
1292 if (processor_alias_table[i].flags & PTA_SSE2
1293 && !(target_flags_explicit & MASK_SSE2))
1294 target_flags |= MASK_SSE2;
1295 if (processor_alias_table[i].flags & PTA_SSE3
1296 && !(target_flags_explicit & MASK_SSE3))
1297 target_flags |= MASK_SSE3;
1298 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1299 x86_prefetch_sse = true;
1300 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1301 error ("CPU you selected does not support x86-64 "
1307 error ("bad value (%s) for -march= switch", ix86_arch_string);
1309 for (i = 0; i < pta_size; i++)
1310 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1312 ix86_tune = processor_alias_table[i].processor;
1313 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1315 if (ix86_tune_defaulted)
1317 ix86_tune_string = "x86-64";
1318 for (i = 0; i < pta_size; i++)
1319 if (! strcmp (ix86_tune_string,
1320 processor_alias_table[i].name))
1322 ix86_tune = processor_alias_table[i].processor;
1325 error ("CPU you selected does not support x86-64 "
1328 /* Intel CPUs have always interpreted SSE prefetch instructions as
1329 NOPs; so, we can enable SSE prefetch instructions even when
1330 -mtune (rather than -march) points us to a processor that has them.
1331 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1332 higher processors. */
1333 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1334 x86_prefetch_sse = true;
1338 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1341 ix86_cost = &size_cost;
1343 ix86_cost = processor_target_table[ix86_tune].cost;
1344 target_flags |= processor_target_table[ix86_tune].target_enable;
1345 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1347 /* Arrange to set up i386_stack_locals for all functions. */
1348 init_machine_status = ix86_init_machine_status;
1350 /* Validate -mregparm= value. */
1351 if (ix86_regparm_string)
1353 i = atoi (ix86_regparm_string);
1354 if (i < 0 || i > REGPARM_MAX)
1355 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1361 ix86_regparm = REGPARM_MAX;
1363 /* If the user has provided any of the -malign-* options,
1364 warn and use that value only if -falign-* is not set.
1365 Remove this code in GCC 3.2 or later. */
1366 if (ix86_align_loops_string)
1368 warning ("-malign-loops is obsolete, use -falign-loops");
1369 if (align_loops == 0)
1371 i = atoi (ix86_align_loops_string);
1372 if (i < 0 || i > MAX_CODE_ALIGN)
1373 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1375 align_loops = 1 << i;
1379 if (ix86_align_jumps_string)
1381 warning ("-malign-jumps is obsolete, use -falign-jumps");
1382 if (align_jumps == 0)
1384 i = atoi (ix86_align_jumps_string);
1385 if (i < 0 || i > MAX_CODE_ALIGN)
1386 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1388 align_jumps = 1 << i;
1392 if (ix86_align_funcs_string)
1394 warning ("-malign-functions is obsolete, use -falign-functions");
1395 if (align_functions == 0)
1397 i = atoi (ix86_align_funcs_string);
1398 if (i < 0 || i > MAX_CODE_ALIGN)
1399 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1401 align_functions = 1 << i;
1405 /* Default align_* from the processor table. */
1406 if (align_loops == 0)
1408 align_loops = processor_target_table[ix86_tune].align_loop;
1409 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1411 if (align_jumps == 0)
1413 align_jumps = processor_target_table[ix86_tune].align_jump;
1414 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1416 if (align_functions == 0)
1418 align_functions = processor_target_table[ix86_tune].align_func;
1421 /* Validate -mpreferred-stack-boundary= value, or provide default.
1422 The default of 128 bits is for Pentium III's SSE __m128, but we
1423 don't want additional code to keep the stack aligned when
1424 optimizing for code size. */
1425 ix86_preferred_stack_boundary = (optimize_size
1426 ? TARGET_64BIT ? 128 : 32
1428 if (ix86_preferred_stack_boundary_string)
1430 i = atoi (ix86_preferred_stack_boundary_string);
1431 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1432 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1433 TARGET_64BIT ? 4 : 2);
1435 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1438 /* Validate -mbranch-cost= value, or provide default. */
1439 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1440 if (ix86_branch_cost_string)
1442 i = atoi (ix86_branch_cost_string);
1444 error ("-mbranch-cost=%d is not between 0 and 5", i);
1446 ix86_branch_cost = i;
1449 if (ix86_tls_dialect_string)
1451 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1452 ix86_tls_dialect = TLS_DIALECT_GNU;
1453 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1454 ix86_tls_dialect = TLS_DIALECT_SUN;
1456 error ("bad value (%s) for -mtls-dialect= switch",
1457 ix86_tls_dialect_string);
1460 /* Keep nonleaf frame pointers. */
1461 if (flag_omit_frame_pointer)
1462 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1463 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1464 flag_omit_frame_pointer = 1;
1466 /* If we're doing fast math, we don't care about comparison order
1467 wrt NaNs. This lets us use a shorter comparison sequence. */
1468 if (flag_unsafe_math_optimizations)
1469 target_flags &= ~MASK_IEEE_FP;
1471 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1472 since the insns won't need emulation. */
1473 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1474 target_flags &= ~MASK_NO_FANCY_MATH_387;
1476 /* Likewise, if the target doesn't have a 387, or we've specified
1477 software floating point, don't use 387 inline instrinsics. */
1479 target_flags |= MASK_NO_FANCY_MATH_387;
1481 /* Turn on SSE2 builtins for -msse3. */
1483 target_flags |= MASK_SSE2;
1485 /* Turn on SSE builtins for -msse2. */
1487 target_flags |= MASK_SSE;
1489 /* Turn on MMX builtins for -msse. */
1492 target_flags |= MASK_MMX & ~target_flags_explicit;
1493 x86_prefetch_sse = true;
1496 /* Turn on MMX builtins for 3Dnow. */
1498 target_flags |= MASK_MMX;
1502 if (TARGET_ALIGN_DOUBLE)
1503 error ("-malign-double makes no sense in the 64bit mode");
1505 error ("-mrtd calling convention not supported in the 64bit mode");
1507 /* Enable by default the SSE and MMX builtins. Do allow the user to
1508 explicitly disable any of these. In particular, disabling SSE and
1509 MMX for kernel code is extremely useful. */
1511 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1512 & ~target_flags_explicit);
1515 ix86_fpmath = FPMATH_SSE;
1519 ix86_fpmath = FPMATH_387;
1520 /* i386 ABI does not specify red zone. It still makes sense to use it
1521 when programmer takes care to stack from being destroyed. */
1522 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1523 target_flags |= MASK_NO_RED_ZONE;
1526 if (ix86_fpmath_string != 0)
1528 if (! strcmp (ix86_fpmath_string, "387"))
1529 ix86_fpmath = FPMATH_387;
1530 else if (! strcmp (ix86_fpmath_string, "sse"))
1534 warning ("SSE instruction set disabled, using 387 arithmetics");
1535 ix86_fpmath = FPMATH_387;
1538 ix86_fpmath = FPMATH_SSE;
1540 else if (! strcmp (ix86_fpmath_string, "387,sse")
1541 || ! strcmp (ix86_fpmath_string, "sse,387"))
1545 warning ("SSE instruction set disabled, using 387 arithmetics");
1546 ix86_fpmath = FPMATH_387;
1548 else if (!TARGET_80387)
1550 warning ("387 instruction set disabled, using SSE arithmetics");
1551 ix86_fpmath = FPMATH_SSE;
1554 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1557 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1560 /* If fpmath doesn't include 387, disable use of x87 intrinsics. */
1561 if (! (ix86_fpmath & FPMATH_387))
1562 target_flags |= MASK_NO_FANCY_MATH_387;
1564 if ((x86_accumulate_outgoing_args & TUNEMASK)
1565 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1567 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1569 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1572 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1573 p = strchr (internal_label_prefix, 'X');
1574 internal_label_prefix_len = p - internal_label_prefix;
1578 /* When scheduling description is not available, disable scheduler pass
1579 so it won't slow down the compilation and make x87 code slower. */
1580 if (!TARGET_SCHEDULE)
1581 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1585 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1587 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1588 make the problem with not enough registers even worse. */
1589 #ifdef INSN_SCHEDULING
1591 flag_schedule_insns = 0;
1594 /* The default values of these switches depend on the TARGET_64BIT
1595 that is not known at this moment. Mark these values with 2 and
1596 let user the to override these. In case there is no command line option
1597 specifying them, we will set the defaults in override_options. */
1599 flag_omit_frame_pointer = 2;
1600 flag_pcc_struct_return = 2;
1601 flag_asynchronous_unwind_tables = 2;
1602 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1603 SUBTARGET_OPTIMIZATION_OPTIONS;
1607 /* Table of valid machine attributes. */
1608 const struct attribute_spec ix86_attribute_table[] =
1610 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1611 /* Stdcall attribute says callee is responsible for popping arguments
1612 if they are not variable. */
1613 { "stdcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1614 /* Fastcall attribute says callee is responsible for popping arguments
1615 if they are not variable. */
1616 { "fastcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1617 /* Cdecl attribute says the callee is a normal C declaration */
1618 { "cdecl", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1619 /* Regparm attribute specifies how many integer arguments are to be
1620 passed in registers. */
1621 { "regparm", 1, 1, false, true, true, ix86_handle_regparm_attribute },
1622 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1623 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1624 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1625 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1627 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1628 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1629 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1630 SUBTARGET_ATTRIBUTE_TABLE,
1632 { NULL, 0, 0, false, false, false, NULL }
1635 /* Decide whether we can make a sibling call to a function. DECL is the
1636 declaration of the function being targeted by the call and EXP is the
1637 CALL_EXPR representing the call. */
1640 ix86_function_ok_for_sibcall (tree decl, tree exp)
1642 /* If we are generating position-independent code, we cannot sibcall
1643 optimize any indirect call, or a direct call to a global function,
1644 as the PLT requires %ebx be live. */
1645 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1648 /* If we are returning floats on the 80387 register stack, we cannot
1649 make a sibcall from a function that doesn't return a float to a
1650 function that does or, conversely, from a function that does return
1651 a float to a function that doesn't; the necessary stack adjustment
1652 would not be executed. */
1653 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp)))
1654 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)))))
1657 /* If this call is indirect, we'll need to be able to use a call-clobbered
1658 register for the address of the target function. Make sure that all
1659 such registers are not used for passing parameters. */
1660 if (!decl && !TARGET_64BIT)
1664 /* We're looking at the CALL_EXPR, we need the type of the function. */
1665 type = TREE_OPERAND (exp, 0); /* pointer expression */
1666 type = TREE_TYPE (type); /* pointer type */
1667 type = TREE_TYPE (type); /* function type */
1669 if (ix86_function_regparm (type, NULL) >= 3)
1671 /* ??? Need to count the actual number of registers to be used,
1672 not the possible number of registers. Fix later. */
1677 /* Otherwise okay. That also includes certain types of indirect calls. */
1681 /* Handle a "cdecl", "stdcall", or "fastcall" attribute;
1682 arguments as in struct attribute_spec.handler. */
1684 ix86_handle_cdecl_attribute (tree *node, tree name,
1685 tree args ATTRIBUTE_UNUSED,
1686 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1688 if (TREE_CODE (*node) != FUNCTION_TYPE
1689 && TREE_CODE (*node) != METHOD_TYPE
1690 && TREE_CODE (*node) != FIELD_DECL
1691 && TREE_CODE (*node) != TYPE_DECL)
1693 warning ("%qs attribute only applies to functions",
1694 IDENTIFIER_POINTER (name));
1695 *no_add_attrs = true;
1699 if (is_attribute_p ("fastcall", name))
1701 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1703 error ("fastcall and stdcall attributes are not compatible");
1705 else if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
1707 error ("fastcall and regparm attributes are not compatible");
1710 else if (is_attribute_p ("stdcall", name))
1712 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1714 error ("fastcall and stdcall attributes are not compatible");
1721 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
1722 *no_add_attrs = true;
1728 /* Handle a "regparm" attribute;
1729 arguments as in struct attribute_spec.handler. */
1731 ix86_handle_regparm_attribute (tree *node, tree name, tree args,
1732 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1734 if (TREE_CODE (*node) != FUNCTION_TYPE
1735 && TREE_CODE (*node) != METHOD_TYPE
1736 && TREE_CODE (*node) != FIELD_DECL
1737 && TREE_CODE (*node) != TYPE_DECL)
1739 warning ("%qs attribute only applies to functions",
1740 IDENTIFIER_POINTER (name));
1741 *no_add_attrs = true;
1747 cst = TREE_VALUE (args);
1748 if (TREE_CODE (cst) != INTEGER_CST)
1750 warning ("%qs attribute requires an integer constant argument",
1751 IDENTIFIER_POINTER (name));
1752 *no_add_attrs = true;
1754 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
1756 warning ("argument to %qs attribute larger than %d",
1757 IDENTIFIER_POINTER (name), REGPARM_MAX);
1758 *no_add_attrs = true;
1761 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1763 error ("fastcall and regparm attributes are not compatible");
1770 /* Return 0 if the attributes for two types are incompatible, 1 if they
1771 are compatible, and 2 if they are nearly compatible (which causes a
1772 warning to be generated). */
1775 ix86_comp_type_attributes (tree type1, tree type2)
1777 /* Check for mismatch of non-default calling convention. */
1778 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
1780 if (TREE_CODE (type1) != FUNCTION_TYPE)
1783 /* Check for mismatched fastcall types */
1784 if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
1785 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
1788 /* Check for mismatched return types (cdecl vs stdcall). */
1789 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
1790 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
1792 if (ix86_function_regparm (type1, NULL)
1793 != ix86_function_regparm (type2, NULL))
1798 /* Return the regparm value for a fuctio with the indicated TYPE and DECL.
1799 DECL may be NULL when calling function indirectly
1800 or considering a libcall. */
1803 ix86_function_regparm (tree type, tree decl)
1806 int regparm = ix86_regparm;
1807 bool user_convention = false;
1811 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
1814 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
1815 user_convention = true;
1818 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
1821 user_convention = true;
1824 /* Use register calling convention for local functions when possible. */
1825 if (!TARGET_64BIT && !user_convention && decl
1826 && flag_unit_at_a_time && !profile_flag)
1828 struct cgraph_local_info *i = cgraph_local_info (decl);
1831 /* We can't use regparm(3) for nested functions as these use
1832 static chain pointer in third argument. */
1833 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
1843 /* Return true if EAX is live at the start of the function. Used by
1844 ix86_expand_prologue to determine if we need special help before
1845 calling allocate_stack_worker. */
1848 ix86_eax_live_at_start_p (void)
1850 /* Cheat. Don't bother working forward from ix86_function_regparm
1851 to the function type to whether an actual argument is located in
1852 eax. Instead just look at cfg info, which is still close enough
1853 to correct at this point. This gives false positives for broken
1854 functions that might use uninitialized data that happens to be
1855 allocated in eax, but who cares? */
1856 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, 0);
1859 /* Value is the number of bytes of arguments automatically
1860 popped when returning from a subroutine call.
1861 FUNDECL is the declaration node of the function (as a tree),
1862 FUNTYPE is the data type of the function (as a tree),
1863 or for a library call it is an identifier node for the subroutine name.
1864 SIZE is the number of bytes of arguments passed on the stack.
1866 On the 80386, the RTD insn may be used to pop them if the number
1867 of args is fixed, but if the number is variable then the caller
1868 must pop them all. RTD can't be used for library calls now
1869 because the library is compiled with the Unix compiler.
1870 Use of RTD is a selectable option, since it is incompatible with
1871 standard Unix calling sequences. If the option is not selected,
1872 the caller must always pop the args.
1874 The attribute stdcall is equivalent to RTD on a per module basis. */
1877 ix86_return_pops_args (tree fundecl, tree funtype, int size)
1879 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
1881 /* Cdecl functions override -mrtd, and never pop the stack. */
1882 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
1884 /* Stdcall and fastcall functions will pop the stack if not
1886 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
1887 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
1891 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
1892 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
1893 == void_type_node)))
1897 /* Lose any fake structure return argument if it is passed on the stack. */
1898 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
1900 && !KEEP_AGGREGATE_RETURN_POINTER)
1902 int nregs = ix86_function_regparm (funtype, fundecl);
1905 return GET_MODE_SIZE (Pmode);
1911 /* Argument support functions. */
1913 /* Return true when register may be used to pass function parameters. */
1915 ix86_function_arg_regno_p (int regno)
1919 return (regno < REGPARM_MAX
1920 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
1921 if (SSE_REGNO_P (regno) && TARGET_SSE)
1923 /* RAX is used as hidden argument to va_arg functions. */
1926 for (i = 0; i < REGPARM_MAX; i++)
1927 if (regno == x86_64_int_parameter_registers[i])
1932 /* Return if we do not know how to pass TYPE solely in registers. */
1935 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
1937 if (must_pass_in_stack_var_size_or_pad (mode, type))
1940 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
1941 The layout_type routine is crafty and tries to trick us into passing
1942 currently unsupported vector types on the stack by using TImode. */
1943 return (!TARGET_64BIT && mode == TImode
1944 && type && TREE_CODE (type) != VECTOR_TYPE);
1947 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1948 for a call to a function whose data type is FNTYPE.
1949 For a library call, FNTYPE is 0. */
1952 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
1953 tree fntype, /* tree ptr for function decl */
1954 rtx libname, /* SYMBOL_REF of library name or 0 */
1957 static CUMULATIVE_ARGS zero_cum;
1958 tree param, next_param;
1960 if (TARGET_DEBUG_ARG)
1962 fprintf (stderr, "\ninit_cumulative_args (");
1964 fprintf (stderr, "fntype code = %s, ret code = %s",
1965 tree_code_name[(int) TREE_CODE (fntype)],
1966 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
1968 fprintf (stderr, "no fntype");
1971 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
1976 /* Set up the number of registers to use for passing arguments. */
1978 cum->nregs = ix86_function_regparm (fntype, fndecl);
1980 cum->nregs = ix86_regparm;
1982 cum->sse_nregs = SSE_REGPARM_MAX;
1984 cum->mmx_nregs = MMX_REGPARM_MAX;
1985 cum->warn_sse = true;
1986 cum->warn_mmx = true;
1987 cum->maybe_vaarg = false;
1989 /* Use ecx and edx registers if function has fastcall attribute */
1990 if (fntype && !TARGET_64BIT)
1992 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
1999 /* Determine if this function has variable arguments. This is
2000 indicated by the last argument being 'void_type_mode' if there
2001 are no variable arguments. If there are variable arguments, then
2002 we won't pass anything in registers in 32-bit mode. */
2004 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2006 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2007 param != 0; param = next_param)
2009 next_param = TREE_CHAIN (param);
2010 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2021 cum->maybe_vaarg = true;
2025 if ((!fntype && !libname)
2026 || (fntype && !TYPE_ARG_TYPES (fntype)))
2027 cum->maybe_vaarg = 1;
2029 if (TARGET_DEBUG_ARG)
2030 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2035 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2036 But in the case of vector types, it is some vector mode.
2038 When we have only some of our vector isa extensions enabled, then there
2039 are some modes for which vector_mode_supported_p is false. For these
2040 modes, the generic vector support in gcc will choose some non-vector mode
2041 in order to implement the type. By computing the natural mode, we'll
2042 select the proper ABI location for the operand and not depend on whatever
2043 the middle-end decides to do with these vector types. */
2045 static enum machine_mode
2046 type_natural_mode (tree type)
2048 enum machine_mode mode = TYPE_MODE (type);
2050 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2052 HOST_WIDE_INT size = int_size_in_bytes (type);
2053 if ((size == 8 || size == 16)
2054 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2055 && TYPE_VECTOR_SUBPARTS (type) > 1)
2057 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2059 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2060 mode = MIN_MODE_VECTOR_FLOAT;
2062 mode = MIN_MODE_VECTOR_INT;
2064 /* Get the mode which has this inner mode and number of units. */
2065 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2066 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2067 && GET_MODE_INNER (mode) == innermode)
2077 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2078 this may not agree with the mode that the type system has chosen for the
2079 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2080 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2083 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2088 if (orig_mode != BLKmode)
2089 tmp = gen_rtx_REG (orig_mode, regno);
2092 tmp = gen_rtx_REG (mode, regno);
2093 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2094 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2100 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2101 of this code is to classify each 8bytes of incoming argument by the register
2102 class and assign registers accordingly. */
2104 /* Return the union class of CLASS1 and CLASS2.
2105 See the x86-64 PS ABI for details. */
2107 static enum x86_64_reg_class
2108 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2110 /* Rule #1: If both classes are equal, this is the resulting class. */
2111 if (class1 == class2)
2114 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2116 if (class1 == X86_64_NO_CLASS)
2118 if (class2 == X86_64_NO_CLASS)
2121 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2122 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2123 return X86_64_MEMORY_CLASS;
2125 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2126 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2127 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2128 return X86_64_INTEGERSI_CLASS;
2129 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2130 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2131 return X86_64_INTEGER_CLASS;
2133 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2135 if (class1 == X86_64_X87_CLASS
2136 || class1 == X86_64_X87UP_CLASS
2137 || class1 == X86_64_COMPLEX_X87_CLASS
2138 || class2 == X86_64_X87_CLASS
2139 || class2 == X86_64_X87UP_CLASS
2140 || class2 == X86_64_COMPLEX_X87_CLASS)
2141 return X86_64_MEMORY_CLASS;
2143 /* Rule #6: Otherwise class SSE is used. */
2144 return X86_64_SSE_CLASS;
2147 /* Classify the argument of type TYPE and mode MODE.
2148 CLASSES will be filled by the register class used to pass each word
2149 of the operand. The number of words is returned. In case the parameter
2150 should be passed in memory, 0 is returned. As a special case for zero
2151 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2153 BIT_OFFSET is used internally for handling records and specifies offset
2154 of the offset in bits modulo 256 to avoid overflow cases.
2156 See the x86-64 PS ABI for details.
2160 classify_argument (enum machine_mode mode, tree type,
2161 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2163 HOST_WIDE_INT bytes =
2164 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2165 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2167 /* Variable sized entities are always passed/returned in memory. */
2171 if (mode != VOIDmode
2172 && targetm.calls.must_pass_in_stack (mode, type))
2175 if (type && AGGREGATE_TYPE_P (type))
2179 enum x86_64_reg_class subclasses[MAX_CLASSES];
2181 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2185 for (i = 0; i < words; i++)
2186 classes[i] = X86_64_NO_CLASS;
2188 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2189 signalize memory class, so handle it as special case. */
2192 classes[0] = X86_64_NO_CLASS;
2196 /* Classify each field of record and merge classes. */
2197 if (TREE_CODE (type) == RECORD_TYPE)
2199 /* For classes first merge in the field of the subclasses. */
2200 if (TYPE_BINFO (type))
2202 tree binfo, base_binfo;
2205 for (binfo = TYPE_BINFO (type), basenum = 0;
2206 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2209 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2210 tree type = BINFO_TYPE (base_binfo);
2212 num = classify_argument (TYPE_MODE (type),
2214 (offset + bit_offset) % 256);
2217 for (i = 0; i < num; i++)
2219 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2221 merge_classes (subclasses[i], classes[i + pos]);
2225 /* And now merge the fields of structure. */
2226 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2228 if (TREE_CODE (field) == FIELD_DECL)
2232 /* Bitfields are always classified as integer. Handle them
2233 early, since later code would consider them to be
2234 misaligned integers. */
2235 if (DECL_BIT_FIELD (field))
2237 for (i = int_bit_position (field) / 8 / 8;
2238 i < (int_bit_position (field)
2239 + tree_low_cst (DECL_SIZE (field), 0)
2242 merge_classes (X86_64_INTEGER_CLASS,
2247 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2248 TREE_TYPE (field), subclasses,
2249 (int_bit_position (field)
2250 + bit_offset) % 256);
2253 for (i = 0; i < num; i++)
2256 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2258 merge_classes (subclasses[i], classes[i + pos]);
2264 /* Arrays are handled as small records. */
2265 else if (TREE_CODE (type) == ARRAY_TYPE)
2268 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2269 TREE_TYPE (type), subclasses, bit_offset);
2273 /* The partial classes are now full classes. */
2274 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2275 subclasses[0] = X86_64_SSE_CLASS;
2276 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2277 subclasses[0] = X86_64_INTEGER_CLASS;
2279 for (i = 0; i < words; i++)
2280 classes[i] = subclasses[i % num];
2282 /* Unions are similar to RECORD_TYPE but offset is always 0. */
2283 else if (TREE_CODE (type) == UNION_TYPE
2284 || TREE_CODE (type) == QUAL_UNION_TYPE)
2286 /* For classes first merge in the field of the subclasses. */
2287 if (TYPE_BINFO (type))
2289 tree binfo, base_binfo;
2292 for (binfo = TYPE_BINFO (type), basenum = 0;
2293 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2296 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2297 tree type = BINFO_TYPE (base_binfo);
2299 num = classify_argument (TYPE_MODE (type),
2301 (offset + (bit_offset % 64)) % 256);
2304 for (i = 0; i < num; i++)
2306 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2308 merge_classes (subclasses[i], classes[i + pos]);
2312 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2314 if (TREE_CODE (field) == FIELD_DECL)
2317 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2318 TREE_TYPE (field), subclasses,
2322 for (i = 0; i < num; i++)
2323 classes[i] = merge_classes (subclasses[i], classes[i]);
2330 /* Final merger cleanup. */
2331 for (i = 0; i < words; i++)
2333 /* If one class is MEMORY, everything should be passed in
2335 if (classes[i] == X86_64_MEMORY_CLASS)
2338 /* The X86_64_SSEUP_CLASS should be always preceded by
2339 X86_64_SSE_CLASS. */
2340 if (classes[i] == X86_64_SSEUP_CLASS
2341 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2342 classes[i] = X86_64_SSE_CLASS;
2344 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2345 if (classes[i] == X86_64_X87UP_CLASS
2346 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2347 classes[i] = X86_64_SSE_CLASS;
2352 /* Compute alignment needed. We align all types to natural boundaries with
2353 exception of XFmode that is aligned to 64bits. */
2354 if (mode != VOIDmode && mode != BLKmode)
2356 int mode_alignment = GET_MODE_BITSIZE (mode);
2359 mode_alignment = 128;
2360 else if (mode == XCmode)
2361 mode_alignment = 256;
2362 if (COMPLEX_MODE_P (mode))
2363 mode_alignment /= 2;
2364 /* Misaligned fields are always returned in memory. */
2365 if (bit_offset % mode_alignment)
2369 /* for V1xx modes, just use the base mode */
2370 if (VECTOR_MODE_P (mode)
2371 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2372 mode = GET_MODE_INNER (mode);
2374 /* Classification of atomic types. */
2384 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2385 classes[0] = X86_64_INTEGERSI_CLASS;
2387 classes[0] = X86_64_INTEGER_CLASS;
2391 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2396 if (!(bit_offset % 64))
2397 classes[0] = X86_64_SSESF_CLASS;
2399 classes[0] = X86_64_SSE_CLASS;
2402 classes[0] = X86_64_SSEDF_CLASS;
2405 classes[0] = X86_64_X87_CLASS;
2406 classes[1] = X86_64_X87UP_CLASS;
2409 classes[0] = X86_64_SSE_CLASS;
2410 classes[1] = X86_64_SSEUP_CLASS;
2413 classes[0] = X86_64_SSE_CLASS;
2416 classes[0] = X86_64_SSEDF_CLASS;
2417 classes[1] = X86_64_SSEDF_CLASS;
2420 classes[0] = X86_64_COMPLEX_X87_CLASS;
2423 /* This modes is larger than 16 bytes. */
2431 classes[0] = X86_64_SSE_CLASS;
2432 classes[1] = X86_64_SSEUP_CLASS;
2438 classes[0] = X86_64_SSE_CLASS;
2444 if (VECTOR_MODE_P (mode))
2448 if (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT)
2450 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2451 classes[0] = X86_64_INTEGERSI_CLASS;
2453 classes[0] = X86_64_INTEGER_CLASS;
2454 classes[1] = X86_64_INTEGER_CLASS;
2455 return 1 + (bytes > 8);
2462 /* Examine the argument and return set number of register required in each
2463 class. Return 0 iff parameter should be passed in memory. */
2465 examine_argument (enum machine_mode mode, tree type, int in_return,
2466 int *int_nregs, int *sse_nregs)
2468 enum x86_64_reg_class class[MAX_CLASSES];
2469 int n = classify_argument (mode, type, class, 0);
2475 for (n--; n >= 0; n--)
2478 case X86_64_INTEGER_CLASS:
2479 case X86_64_INTEGERSI_CLASS:
2482 case X86_64_SSE_CLASS:
2483 case X86_64_SSESF_CLASS:
2484 case X86_64_SSEDF_CLASS:
2487 case X86_64_NO_CLASS:
2488 case X86_64_SSEUP_CLASS:
2490 case X86_64_X87_CLASS:
2491 case X86_64_X87UP_CLASS:
2495 case X86_64_COMPLEX_X87_CLASS:
2496 return in_return ? 2 : 0;
2497 case X86_64_MEMORY_CLASS:
2503 /* Construct container for the argument used by GCC interface. See
2504 FUNCTION_ARG for the detailed description. */
2507 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2508 tree type, int in_return, int nintregs, int nsseregs,
2509 const int *intreg, int sse_regno)
2511 enum machine_mode tmpmode;
2513 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2514 enum x86_64_reg_class class[MAX_CLASSES];
2518 int needed_sseregs, needed_intregs;
2519 rtx exp[MAX_CLASSES];
2522 n = classify_argument (mode, type, class, 0);
2523 if (TARGET_DEBUG_ARG)
2526 fprintf (stderr, "Memory class\n");
2529 fprintf (stderr, "Classes:");
2530 for (i = 0; i < n; i++)
2532 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2534 fprintf (stderr, "\n");
2539 if (!examine_argument (mode, type, in_return, &needed_intregs,
2542 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2545 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2546 some less clueful developer tries to use floating-point anyway. */
2547 if (needed_sseregs && !TARGET_SSE)
2549 static bool issued_error;
2552 issued_error = true;
2554 error ("SSE register return with SSE disabled");
2556 error ("SSE register argument with SSE disabled");
2561 /* First construct simple cases. Avoid SCmode, since we want to use
2562 single register to pass this type. */
2563 if (n == 1 && mode != SCmode)
2566 case X86_64_INTEGER_CLASS:
2567 case X86_64_INTEGERSI_CLASS:
2568 return gen_rtx_REG (mode, intreg[0]);
2569 case X86_64_SSE_CLASS:
2570 case X86_64_SSESF_CLASS:
2571 case X86_64_SSEDF_CLASS:
2572 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2573 case X86_64_X87_CLASS:
2574 case X86_64_COMPLEX_X87_CLASS:
2575 return gen_rtx_REG (mode, FIRST_STACK_REG);
2576 case X86_64_NO_CLASS:
2577 /* Zero sized array, struct or class. */
2582 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2584 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2586 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2587 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2588 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2589 && class[1] == X86_64_INTEGER_CLASS
2590 && (mode == CDImode || mode == TImode || mode == TFmode)
2591 && intreg[0] + 1 == intreg[1])
2592 return gen_rtx_REG (mode, intreg[0]);
2594 /* Otherwise figure out the entries of the PARALLEL. */
2595 for (i = 0; i < n; i++)
2599 case X86_64_NO_CLASS:
2601 case X86_64_INTEGER_CLASS:
2602 case X86_64_INTEGERSI_CLASS:
2603 /* Merge TImodes on aligned occasions here too. */
2604 if (i * 8 + 8 > bytes)
2605 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2606 else if (class[i] == X86_64_INTEGERSI_CLASS)
2610 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2611 if (tmpmode == BLKmode)
2613 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2614 gen_rtx_REG (tmpmode, *intreg),
2618 case X86_64_SSESF_CLASS:
2619 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2620 gen_rtx_REG (SFmode,
2621 SSE_REGNO (sse_regno)),
2625 case X86_64_SSEDF_CLASS:
2626 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2627 gen_rtx_REG (DFmode,
2628 SSE_REGNO (sse_regno)),
2632 case X86_64_SSE_CLASS:
2633 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2637 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2638 gen_rtx_REG (tmpmode,
2639 SSE_REGNO (sse_regno)),
2641 if (tmpmode == TImode)
2649 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2650 for (i = 0; i < nexps; i++)
2651 XVECEXP (ret, 0, i) = exp [i];
2655 /* Update the data in CUM to advance over an argument
2656 of mode MODE and data type TYPE.
2657 (TYPE is null for libcalls where that information may not be available.) */
2660 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2661 tree type, int named)
2664 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2665 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2668 mode = type_natural_mode (type);
2670 if (TARGET_DEBUG_ARG)
2671 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
2672 "mode=%s, named=%d)\n\n",
2673 words, cum->words, cum->nregs, cum->sse_nregs,
2674 GET_MODE_NAME (mode), named);
2678 int int_nregs, sse_nregs;
2679 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
2680 cum->words += words;
2681 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
2683 cum->nregs -= int_nregs;
2684 cum->sse_nregs -= sse_nregs;
2685 cum->regno += int_nregs;
2686 cum->sse_regno += sse_nregs;
2689 cum->words += words;
2707 cum->words += words;
2708 cum->nregs -= words;
2709 cum->regno += words;
2711 if (cum->nregs <= 0)
2725 if (!type || !AGGREGATE_TYPE_P (type))
2727 cum->sse_words += words;
2728 cum->sse_nregs -= 1;
2729 cum->sse_regno += 1;
2730 if (cum->sse_nregs <= 0)
2742 if (!type || !AGGREGATE_TYPE_P (type))
2744 cum->mmx_words += words;
2745 cum->mmx_nregs -= 1;
2746 cum->mmx_regno += 1;
2747 if (cum->mmx_nregs <= 0)
2758 /* Define where to put the arguments to a function.
2759 Value is zero to push the argument on the stack,
2760 or a hard register in which to store the argument.
2762 MODE is the argument's machine mode.
2763 TYPE is the data type of the argument (as a tree).
2764 This is null for libcalls where that information may
2766 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2767 the preceding args and about the function being called.
2768 NAMED is nonzero if this argument is a named parameter
2769 (otherwise it is an extra parameter matching an ellipsis). */
2772 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
2773 tree type, int named)
2775 enum machine_mode mode = orig_mode;
2778 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2779 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2780 static bool warnedsse, warnedmmx;
2782 /* To simplify the code below, represent vector types with a vector mode
2783 even if MMX/SSE are not active. */
2784 if (type && TREE_CODE (type) == VECTOR_TYPE)
2785 mode = type_natural_mode (type);
2787 /* Handle a hidden AL argument containing number of registers for varargs
2788 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2790 if (mode == VOIDmode)
2793 return GEN_INT (cum->maybe_vaarg
2794 ? (cum->sse_nregs < 0
2802 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
2804 &x86_64_int_parameter_registers [cum->regno],
2809 /* For now, pass fp/complex values on the stack. */
2821 if (words <= cum->nregs)
2823 int regno = cum->regno;
2825 /* Fastcall allocates the first two DWORD (SImode) or
2826 smaller arguments to ECX and EDX. */
2829 if (mode == BLKmode || mode == DImode)
2832 /* ECX not EAX is the first allocated register. */
2836 ret = gen_rtx_REG (mode, regno);
2846 if (!type || !AGGREGATE_TYPE_P (type))
2848 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
2851 warning ("SSE vector argument without SSE enabled "
2855 ret = gen_reg_or_parallel (mode, orig_mode,
2856 cum->sse_regno + FIRST_SSE_REG);
2863 if (!type || !AGGREGATE_TYPE_P (type))
2865 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
2868 warning ("MMX vector argument without MMX enabled "
2872 ret = gen_reg_or_parallel (mode, orig_mode,
2873 cum->mmx_regno + FIRST_MMX_REG);
2878 if (TARGET_DEBUG_ARG)
2881 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
2882 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
2885 print_simple_rtl (stderr, ret);
2887 fprintf (stderr, ", stack");
2889 fprintf (stderr, " )\n");
2895 /* A C expression that indicates when an argument must be passed by
2896 reference. If nonzero for an argument, a copy of that argument is
2897 made in memory and a pointer to the argument is passed instead of
2898 the argument itself. The pointer is passed in whatever way is
2899 appropriate for passing a pointer to that type. */
2902 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2903 enum machine_mode mode ATTRIBUTE_UNUSED,
2904 tree type, bool named ATTRIBUTE_UNUSED)
2909 if (type && int_size_in_bytes (type) == -1)
2911 if (TARGET_DEBUG_ARG)
2912 fprintf (stderr, "function_arg_pass_by_reference\n");
2919 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
2920 ABI. Only called if TARGET_SSE. */
2922 contains_128bit_aligned_vector_p (tree type)
2924 enum machine_mode mode = TYPE_MODE (type);
2925 if (SSE_REG_MODE_P (mode)
2926 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
2928 if (TYPE_ALIGN (type) < 128)
2931 if (AGGREGATE_TYPE_P (type))
2933 /* Walk the aggregates recursively. */
2934 if (TREE_CODE (type) == RECORD_TYPE
2935 || TREE_CODE (type) == UNION_TYPE
2936 || TREE_CODE (type) == QUAL_UNION_TYPE)
2940 if (TYPE_BINFO (type))
2942 tree binfo, base_binfo;
2945 for (binfo = TYPE_BINFO (type), i = 0;
2946 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2947 if (contains_128bit_aligned_vector_p (BINFO_TYPE (base_binfo)))
2950 /* And now merge the fields of structure. */
2951 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2953 if (TREE_CODE (field) == FIELD_DECL
2954 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
2958 /* Just for use if some languages passes arrays by value. */
2959 else if (TREE_CODE (type) == ARRAY_TYPE)
2961 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
2970 /* Gives the alignment boundary, in bits, of an argument with the
2971 specified mode and type. */
2974 ix86_function_arg_boundary (enum machine_mode mode, tree type)
2978 align = TYPE_ALIGN (type);
2980 align = GET_MODE_ALIGNMENT (mode);
2981 if (align < PARM_BOUNDARY)
2982 align = PARM_BOUNDARY;
2985 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
2986 make an exception for SSE modes since these require 128bit
2989 The handling here differs from field_alignment. ICC aligns MMX
2990 arguments to 4 byte boundaries, while structure fields are aligned
2991 to 8 byte boundaries. */
2993 align = PARM_BOUNDARY;
2996 if (!SSE_REG_MODE_P (mode))
2997 align = PARM_BOUNDARY;
3001 if (!contains_128bit_aligned_vector_p (type))
3002 align = PARM_BOUNDARY;
3010 /* Return true if N is a possible register number of function value. */
3012 ix86_function_value_regno_p (int regno)
3016 return ((regno) == 0
3017 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3018 || ((regno) == FIRST_SSE_REG && TARGET_SSE));
3020 return ((regno) == 0 || (regno) == FIRST_FLOAT_REG
3021 || ((regno) == FIRST_SSE_REG && TARGET_SSE)
3022 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387));
3025 /* Define how to find the value returned by a function.
3026 VALTYPE is the data type of the value (as a tree).
3027 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3028 otherwise, FUNC is 0. */
3030 ix86_function_value (tree valtype)
3032 enum machine_mode natmode = type_natural_mode (valtype);
3036 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3037 1, REGPARM_MAX, SSE_REGPARM_MAX,
3038 x86_64_int_return_registers, 0);
3039 /* For zero sized structures, construct_container return NULL, but we
3040 need to keep rest of compiler happy by returning meaningful value. */
3042 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3046 return gen_rtx_REG (TYPE_MODE (valtype), ix86_value_regno (natmode));
3049 /* Return false iff type is returned in memory. */
3051 ix86_return_in_memory (tree type)
3053 int needed_intregs, needed_sseregs, size;
3054 enum machine_mode mode = type_natural_mode (type);
3057 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3059 if (mode == BLKmode)
3062 size = int_size_in_bytes (type);
3064 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3067 if (VECTOR_MODE_P (mode) || mode == TImode)
3069 /* User-created vectors small enough to fit in EAX. */
3073 /* MMX/3dNow values are returned on the stack, since we've
3074 got to EMMS/FEMMS before returning. */
3078 /* SSE values are returned in XMM0, except when it doesn't exist. */
3080 return (TARGET_SSE ? 0 : 1);
3091 /* When returning SSE vector types, we have a choice of either
3092 (1) being abi incompatible with a -march switch, or
3093 (2) generating an error.
3094 Given no good solution, I think the safest thing is one warning.
3095 The user won't be able to use -Werror, but....
3097 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3098 called in response to actually generating a caller or callee that
3099 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3100 via aggregate_value_p for general type probing from tree-ssa. */
3103 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3107 if (!TARGET_SSE && type && !warned)
3109 /* Look at the return type of the function, not the function type. */
3110 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3113 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3116 warning ("SSE vector return without SSE enabled changes the ABI");
3123 /* Define how to find the value returned by a library function
3124 assuming the value has mode MODE. */
3126 ix86_libcall_value (enum machine_mode mode)
3137 return gen_rtx_REG (mode, FIRST_SSE_REG);
3140 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3144 return gen_rtx_REG (mode, 0);
3148 return gen_rtx_REG (mode, ix86_value_regno (mode));
3151 /* Given a mode, return the register to use for a return value. */
3154 ix86_value_regno (enum machine_mode mode)
3156 /* Floating point return values in %st(0). */
3157 if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_FLOAT_RETURNS_IN_80387)
3158 return FIRST_FLOAT_REG;
3159 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3160 we prevent this case when sse is not available. */
3161 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3162 return FIRST_SSE_REG;
3163 /* Everything else in %eax. */
3167 /* Create the va_list data type. */
3170 ix86_build_builtin_va_list (void)
3172 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3174 /* For i386 we use plain pointer to argument area. */
3176 return build_pointer_type (char_type_node);
3178 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3179 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3181 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3182 unsigned_type_node);
3183 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3184 unsigned_type_node);
3185 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3187 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3190 DECL_FIELD_CONTEXT (f_gpr) = record;
3191 DECL_FIELD_CONTEXT (f_fpr) = record;
3192 DECL_FIELD_CONTEXT (f_ovf) = record;
3193 DECL_FIELD_CONTEXT (f_sav) = record;
3195 TREE_CHAIN (record) = type_decl;
3196 TYPE_NAME (record) = type_decl;
3197 TYPE_FIELDS (record) = f_gpr;
3198 TREE_CHAIN (f_gpr) = f_fpr;
3199 TREE_CHAIN (f_fpr) = f_ovf;
3200 TREE_CHAIN (f_ovf) = f_sav;
3202 layout_type (record);
3204 /* The correct type is an array type of one element. */
3205 return build_array_type (record, build_index_type (size_zero_node));
3208 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3211 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3212 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3215 CUMULATIVE_ARGS next_cum;
3216 rtx save_area = NULL_RTX, mem;
3229 /* Indicate to allocate space on the stack for varargs save area. */
3230 ix86_save_varrargs_registers = 1;
3232 cfun->stack_alignment_needed = 128;
3234 fntype = TREE_TYPE (current_function_decl);
3235 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3236 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3237 != void_type_node));
3239 /* For varargs, we do not want to skip the dummy va_dcl argument.
3240 For stdargs, we do want to skip the last named argument. */
3243 function_arg_advance (&next_cum, mode, type, 1);
3246 save_area = frame_pointer_rtx;
3248 set = get_varargs_alias_set ();
3250 for (i = next_cum.regno; i < ix86_regparm; i++)
3252 mem = gen_rtx_MEM (Pmode,
3253 plus_constant (save_area, i * UNITS_PER_WORD));
3254 set_mem_alias_set (mem, set);
3255 emit_move_insn (mem, gen_rtx_REG (Pmode,
3256 x86_64_int_parameter_registers[i]));
3259 if (next_cum.sse_nregs)
3261 /* Now emit code to save SSE registers. The AX parameter contains number
3262 of SSE parameter registers used to call this function. We use
3263 sse_prologue_save insn template that produces computed jump across
3264 SSE saves. We need some preparation work to get this working. */
3266 label = gen_label_rtx ();
3267 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3269 /* Compute address to jump to :
3270 label - 5*eax + nnamed_sse_arguments*5 */
3271 tmp_reg = gen_reg_rtx (Pmode);
3272 nsse_reg = gen_reg_rtx (Pmode);
3273 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3274 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3275 gen_rtx_MULT (Pmode, nsse_reg,
3277 if (next_cum.sse_regno)
3280 gen_rtx_CONST (DImode,
3281 gen_rtx_PLUS (DImode,
3283 GEN_INT (next_cum.sse_regno * 4))));
3285 emit_move_insn (nsse_reg, label_ref);
3286 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3288 /* Compute address of memory block we save into. We always use pointer
3289 pointing 127 bytes after first byte to store - this is needed to keep
3290 instruction size limited by 4 bytes. */
3291 tmp_reg = gen_reg_rtx (Pmode);
3292 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3293 plus_constant (save_area,
3294 8 * REGPARM_MAX + 127)));
3295 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3296 set_mem_alias_set (mem, set);
3297 set_mem_align (mem, BITS_PER_WORD);
3299 /* And finally do the dirty job! */
3300 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3301 GEN_INT (next_cum.sse_regno), label));
3306 /* Implement va_start. */
3309 ix86_va_start (tree valist, rtx nextarg)
3311 HOST_WIDE_INT words, n_gpr, n_fpr;
3312 tree f_gpr, f_fpr, f_ovf, f_sav;
3313 tree gpr, fpr, ovf, sav, t;
3315 /* Only 64bit target needs something special. */
3318 std_expand_builtin_va_start (valist, nextarg);
3322 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3323 f_fpr = TREE_CHAIN (f_gpr);
3324 f_ovf = TREE_CHAIN (f_fpr);
3325 f_sav = TREE_CHAIN (f_ovf);
3327 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3328 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3329 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3330 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3331 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3333 /* Count number of gp and fp argument registers used. */
3334 words = current_function_args_info.words;
3335 n_gpr = current_function_args_info.regno;
3336 n_fpr = current_function_args_info.sse_regno;
3338 if (TARGET_DEBUG_ARG)
3339 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3340 (int) words, (int) n_gpr, (int) n_fpr);
3342 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3343 build_int_cst (NULL_TREE, n_gpr * 8));
3344 TREE_SIDE_EFFECTS (t) = 1;
3345 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3347 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3348 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3349 TREE_SIDE_EFFECTS (t) = 1;
3350 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3352 /* Find the overflow area. */
3353 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3355 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3356 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3357 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3358 TREE_SIDE_EFFECTS (t) = 1;
3359 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3361 /* Find the register save area.
3362 Prologue of the function save it right above stack frame. */
3363 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3364 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3365 TREE_SIDE_EFFECTS (t) = 1;
3366 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3369 /* Implement va_arg. */
3372 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3374 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3375 tree f_gpr, f_fpr, f_ovf, f_sav;
3376 tree gpr, fpr, ovf, sav, t;
3378 tree lab_false, lab_over = NULL_TREE;
3383 enum machine_mode nat_mode;
3385 /* Only 64bit target needs something special. */
3387 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3389 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3390 f_fpr = TREE_CHAIN (f_gpr);
3391 f_ovf = TREE_CHAIN (f_fpr);
3392 f_sav = TREE_CHAIN (f_ovf);
3394 valist = build_va_arg_indirect_ref (valist);
3395 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3396 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3397 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3398 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3400 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3402 type = build_pointer_type (type);
3403 size = int_size_in_bytes (type);
3404 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3406 nat_mode = type_natural_mode (type);
3407 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3408 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3410 /* Pull the value out of the saved registers. */
3412 addr = create_tmp_var (ptr_type_node, "addr");
3413 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3417 int needed_intregs, needed_sseregs;
3419 tree int_addr, sse_addr;
3421 lab_false = create_artificial_label ();
3422 lab_over = create_artificial_label ();
3424 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3426 need_temp = (!REG_P (container)
3427 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3428 || TYPE_ALIGN (type) > 128));
3430 /* In case we are passing structure, verify that it is consecutive block
3431 on the register save area. If not we need to do moves. */
3432 if (!need_temp && !REG_P (container))
3434 /* Verify that all registers are strictly consecutive */
3435 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3439 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3441 rtx slot = XVECEXP (container, 0, i);
3442 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3443 || INTVAL (XEXP (slot, 1)) != i * 16)
3451 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3453 rtx slot = XVECEXP (container, 0, i);
3454 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3455 || INTVAL (XEXP (slot, 1)) != i * 8)
3467 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3468 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3469 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3470 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3473 /* First ensure that we fit completely in registers. */
3476 t = build_int_cst (TREE_TYPE (gpr),
3477 (REGPARM_MAX - needed_intregs + 1) * 8);
3478 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3479 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3480 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3481 gimplify_and_add (t, pre_p);
3485 t = build_int_cst (TREE_TYPE (fpr),
3486 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3488 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3489 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3490 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3491 gimplify_and_add (t, pre_p);
3494 /* Compute index to start of area used for integer regs. */
3497 /* int_addr = gpr + sav; */
3498 t = fold_convert (ptr_type_node, gpr);
3499 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3500 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3501 gimplify_and_add (t, pre_p);
3505 /* sse_addr = fpr + sav; */
3506 t = fold_convert (ptr_type_node, fpr);
3507 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3508 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3509 gimplify_and_add (t, pre_p);
3514 tree temp = create_tmp_var (type, "va_arg_tmp");
3517 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3518 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3519 gimplify_and_add (t, pre_p);
3521 for (i = 0; i < XVECLEN (container, 0); i++)
3523 rtx slot = XVECEXP (container, 0, i);
3524 rtx reg = XEXP (slot, 0);
3525 enum machine_mode mode = GET_MODE (reg);
3526 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3527 tree addr_type = build_pointer_type (piece_type);
3530 tree dest_addr, dest;
3532 if (SSE_REGNO_P (REGNO (reg)))
3534 src_addr = sse_addr;
3535 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3539 src_addr = int_addr;
3540 src_offset = REGNO (reg) * 8;
3542 src_addr = fold_convert (addr_type, src_addr);
3543 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
3544 size_int (src_offset)));
3545 src = build_va_arg_indirect_ref (src_addr);
3547 dest_addr = fold_convert (addr_type, addr);
3548 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
3549 size_int (INTVAL (XEXP (slot, 1)))));
3550 dest = build_va_arg_indirect_ref (dest_addr);
3552 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
3553 gimplify_and_add (t, pre_p);
3559 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3560 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
3561 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3562 gimplify_and_add (t, pre_p);
3566 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3567 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
3568 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3569 gimplify_and_add (t, pre_p);
3572 t = build1 (GOTO_EXPR, void_type_node, lab_over);
3573 gimplify_and_add (t, pre_p);
3575 t = build1 (LABEL_EXPR, void_type_node, lab_false);
3576 append_to_statement_list (t, pre_p);
3579 /* ... otherwise out of the overflow area. */
3581 /* Care for on-stack alignment if needed. */
3582 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
3586 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
3587 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
3588 build_int_cst (TREE_TYPE (ovf), align - 1));
3589 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3590 build_int_cst (TREE_TYPE (t), -align));
3592 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
3594 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
3595 gimplify_and_add (t2, pre_p);
3597 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
3598 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
3599 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3600 gimplify_and_add (t, pre_p);
3604 t = build1 (LABEL_EXPR, void_type_node, lab_over);
3605 append_to_statement_list (t, pre_p);
3608 ptrtype = build_pointer_type (type);
3609 addr = fold_convert (ptrtype, addr);
3612 addr = build_va_arg_indirect_ref (addr);
3613 return build_va_arg_indirect_ref (addr);
3616 /* Return nonzero if OPNUM's MEM should be matched
3617 in movabs* patterns. */
3620 ix86_check_movabs (rtx insn, int opnum)
3624 set = PATTERN (insn);
3625 if (GET_CODE (set) == PARALLEL)
3626 set = XVECEXP (set, 0, 0);
3627 if (GET_CODE (set) != SET)
3629 mem = XEXP (set, opnum);
3630 while (GET_CODE (mem) == SUBREG)
3631 mem = SUBREG_REG (mem);
3632 if (GET_CODE (mem) != MEM)
3634 return (volatile_ok || !MEM_VOLATILE_P (mem));
3637 /* Initialize the table of extra 80387 mathematical constants. */
3640 init_ext_80387_constants (void)
3642 static const char * cst[5] =
3644 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
3645 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
3646 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
3647 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
3648 "3.1415926535897932385128089594061862044", /* 4: fldpi */
3652 for (i = 0; i < 5; i++)
3654 real_from_string (&ext_80387_constants_table[i], cst[i]);
3655 /* Ensure each constant is rounded to XFmode precision. */
3656 real_convert (&ext_80387_constants_table[i],
3657 XFmode, &ext_80387_constants_table[i]);
3660 ext_80387_constants_init = 1;
3663 /* Return true if the constant is something that can be loaded with
3664 a special instruction. */
3667 standard_80387_constant_p (rtx x)
3669 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
3672 if (x == CONST0_RTX (GET_MODE (x)))
3674 if (x == CONST1_RTX (GET_MODE (x)))
3677 /* For XFmode constants, try to find a special 80387 instruction when
3678 optimizing for size or on those CPUs that benefit from them. */
3679 if (GET_MODE (x) == XFmode
3680 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
3685 if (! ext_80387_constants_init)
3686 init_ext_80387_constants ();
3688 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3689 for (i = 0; i < 5; i++)
3690 if (real_identical (&r, &ext_80387_constants_table[i]))
3697 /* Return the opcode of the special instruction to be used to load
3701 standard_80387_constant_opcode (rtx x)
3703 switch (standard_80387_constant_p (x))
3723 /* Return the CONST_DOUBLE representing the 80387 constant that is
3724 loaded by the specified special instruction. The argument IDX
3725 matches the return value from standard_80387_constant_p. */
3728 standard_80387_constant_rtx (int idx)
3732 if (! ext_80387_constants_init)
3733 init_ext_80387_constants ();
3749 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
3753 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
3756 standard_sse_constant_p (rtx x)
3758 if (x == const0_rtx)
3760 return (x == CONST0_RTX (GET_MODE (x)));
3763 /* Returns 1 if OP contains a symbol reference */
3766 symbolic_reference_mentioned_p (rtx op)
3771 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3774 fmt = GET_RTX_FORMAT (GET_CODE (op));
3775 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3781 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3782 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3786 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3793 /* Return 1 if it is appropriate to emit `ret' instructions in the
3794 body of a function. Do this only if the epilogue is simple, needing a
3795 couple of insns. Prior to reloading, we can't tell how many registers
3796 must be saved, so return 0 then. Return 0 if there is no frame
3797 marker to de-allocate. */
3800 ix86_can_use_return_insn_p (void)
3802 struct ix86_frame frame;
3804 if (! reload_completed || frame_pointer_needed)
3807 /* Don't allow more than 32 pop, since that's all we can do
3808 with one instruction. */
3809 if (current_function_pops_args
3810 && current_function_args_size >= 32768)
3813 ix86_compute_frame_layout (&frame);
3814 return frame.to_allocate == 0 && frame.nregs == 0;
3817 /* Value should be nonzero if functions must have frame pointers.
3818 Zero means the frame pointer need not be set up (and parms may
3819 be accessed via the stack pointer) in functions that seem suitable. */
3822 ix86_frame_pointer_required (void)
3824 /* If we accessed previous frames, then the generated code expects
3825 to be able to access the saved ebp value in our frame. */
3826 if (cfun->machine->accesses_prev_frame)
3829 /* Several x86 os'es need a frame pointer for other reasons,
3830 usually pertaining to setjmp. */
3831 if (SUBTARGET_FRAME_POINTER_REQUIRED)
3834 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
3835 the frame pointer by default. Turn it back on now if we've not
3836 got a leaf function. */
3837 if (TARGET_OMIT_LEAF_FRAME_POINTER
3838 && (!current_function_is_leaf))
3841 if (current_function_profile)
3847 /* Record that the current function accesses previous call frames. */
3850 ix86_setup_frame_addresses (void)
3852 cfun->machine->accesses_prev_frame = 1;
3855 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
3856 # define USE_HIDDEN_LINKONCE 1
3858 # define USE_HIDDEN_LINKONCE 0
3861 static int pic_labels_used;
3863 /* Fills in the label name that should be used for a pc thunk for
3864 the given register. */
3867 get_pc_thunk_name (char name[32], unsigned int regno)
3869 if (USE_HIDDEN_LINKONCE)
3870 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
3872 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
3876 /* This function generates code for -fpic that loads %ebx with
3877 the return address of the caller and then returns. */
3880 ix86_file_end (void)
3885 for (regno = 0; regno < 8; ++regno)
3889 if (! ((pic_labels_used >> regno) & 1))
3892 get_pc_thunk_name (name, regno);
3894 if (USE_HIDDEN_LINKONCE)
3898 decl = build_decl (FUNCTION_DECL, get_identifier (name),
3900 TREE_PUBLIC (decl) = 1;
3901 TREE_STATIC (decl) = 1;
3902 DECL_ONE_ONLY (decl) = 1;
3904 (*targetm.asm_out.unique_section) (decl, 0);
3905 named_section (decl, NULL, 0);
3907 (*targetm.asm_out.globalize_label) (asm_out_file, name);
3908 fputs ("\t.hidden\t", asm_out_file);
3909 assemble_name (asm_out_file, name);
3910 fputc ('\n', asm_out_file);
3911 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
3916 ASM_OUTPUT_LABEL (asm_out_file, name);
3919 xops[0] = gen_rtx_REG (SImode, regno);
3920 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
3921 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
3922 output_asm_insn ("ret", xops);
3925 if (NEED_INDICATE_EXEC_STACK)
3926 file_end_indicate_exec_stack ();
3929 /* Emit code for the SET_GOT patterns. */
3932 output_set_got (rtx dest)
3937 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
3939 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
3941 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
3944 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
3946 output_asm_insn ("call\t%a2", xops);
3949 /* Output the "canonical" label name ("Lxx$pb") here too. This
3950 is what will be referred to by the Mach-O PIC subsystem. */
3951 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
3953 (*targetm.asm_out.internal_label) (asm_out_file, "L",
3954 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
3957 output_asm_insn ("pop{l}\t%0", xops);
3962 get_pc_thunk_name (name, REGNO (dest));
3963 pic_labels_used |= 1 << REGNO (dest);
3965 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3966 xops[2] = gen_rtx_MEM (QImode, xops[2]);
3967 output_asm_insn ("call\t%X2", xops);
3970 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
3971 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
3972 else if (!TARGET_MACHO)
3973 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops);
3978 /* Generate an "push" pattern for input ARG. */
3983 return gen_rtx_SET (VOIDmode,
3985 gen_rtx_PRE_DEC (Pmode,
3986 stack_pointer_rtx)),
3990 /* Return >= 0 if there is an unused call-clobbered register available
3991 for the entire function. */
3994 ix86_select_alt_pic_regnum (void)
3996 if (current_function_is_leaf && !current_function_profile)
3999 for (i = 2; i >= 0; --i)
4000 if (!regs_ever_live[i])
4004 return INVALID_REGNUM;
4007 /* Return 1 if we need to save REGNO. */
4009 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4011 if (pic_offset_table_rtx
4012 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4013 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4014 || current_function_profile
4015 || current_function_calls_eh_return
4016 || current_function_uses_const_pool))
4018 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4023 if (current_function_calls_eh_return && maybe_eh_return)
4028 unsigned test = EH_RETURN_DATA_REGNO (i);
4029 if (test == INVALID_REGNUM)
4036 return (regs_ever_live[regno]
4037 && !call_used_regs[regno]
4038 && !fixed_regs[regno]
4039 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4042 /* Return number of registers to be saved on the stack. */
4045 ix86_nsaved_regs (void)
4050 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4051 if (ix86_save_reg (regno, true))
4056 /* Return the offset between two registers, one to be eliminated, and the other
4057 its replacement, at the start of a routine. */
4060 ix86_initial_elimination_offset (int from, int to)
4062 struct ix86_frame frame;
4063 ix86_compute_frame_layout (&frame);
4065 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4066 return frame.hard_frame_pointer_offset;
4067 else if (from == FRAME_POINTER_REGNUM
4068 && to == HARD_FRAME_POINTER_REGNUM)
4069 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4072 if (to != STACK_POINTER_REGNUM)
4074 else if (from == ARG_POINTER_REGNUM)
4075 return frame.stack_pointer_offset;
4076 else if (from != FRAME_POINTER_REGNUM)
4079 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4083 /* Fill structure ix86_frame about frame of currently computed function. */
4086 ix86_compute_frame_layout (struct ix86_frame *frame)
4088 HOST_WIDE_INT total_size;
4089 unsigned int stack_alignment_needed;
4090 HOST_WIDE_INT offset;
4091 unsigned int preferred_alignment;
4092 HOST_WIDE_INT size = get_frame_size ();
4094 frame->nregs = ix86_nsaved_regs ();
4097 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4098 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4100 /* During reload iteration the amount of registers saved can change.
4101 Recompute the value as needed. Do not recompute when amount of registers
4102 didn't change as reload does mutiple calls to the function and does not
4103 expect the decision to change within single iteration. */
4105 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4107 int count = frame->nregs;
4109 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4110 /* The fast prologue uses move instead of push to save registers. This
4111 is significantly longer, but also executes faster as modern hardware
4112 can execute the moves in parallel, but can't do that for push/pop.
4114 Be careful about choosing what prologue to emit: When function takes
4115 many instructions to execute we may use slow version as well as in
4116 case function is known to be outside hot spot (this is known with
4117 feedback only). Weight the size of function by number of registers
4118 to save as it is cheap to use one or two push instructions but very
4119 slow to use many of them. */
4121 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4122 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4123 || (flag_branch_probabilities
4124 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4125 cfun->machine->use_fast_prologue_epilogue = false;
4127 cfun->machine->use_fast_prologue_epilogue
4128 = !expensive_function_p (count);
4130 if (TARGET_PROLOGUE_USING_MOVE
4131 && cfun->machine->use_fast_prologue_epilogue)
4132 frame->save_regs_using_mov = true;
4134 frame->save_regs_using_mov = false;
4137 /* Skip return address and saved base pointer. */
4138 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4140 frame->hard_frame_pointer_offset = offset;
4142 /* Do some sanity checking of stack_alignment_needed and
4143 preferred_alignment, since i386 port is the only using those features
4144 that may break easily. */
4146 if (size && !stack_alignment_needed)
4148 if (preferred_alignment < STACK_BOUNDARY / BITS_PER_UNIT)
4150 if (preferred_alignment > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4152 if (stack_alignment_needed > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4155 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4156 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4158 /* Register save area */
4159 offset += frame->nregs * UNITS_PER_WORD;
4162 if (ix86_save_varrargs_registers)
4164 offset += X86_64_VARARGS_SIZE;
4165 frame->va_arg_size = X86_64_VARARGS_SIZE;
4168 frame->va_arg_size = 0;
4170 /* Align start of frame for local function. */
4171 frame->padding1 = ((offset + stack_alignment_needed - 1)
4172 & -stack_alignment_needed) - offset;
4174 offset += frame->padding1;
4176 /* Frame pointer points here. */
4177 frame->frame_pointer_offset = offset;
4181 /* Add outgoing arguments area. Can be skipped if we eliminated
4182 all the function calls as dead code.
4183 Skipping is however impossible when function calls alloca. Alloca
4184 expander assumes that last current_function_outgoing_args_size
4185 of stack frame are unused. */
4186 if (ACCUMULATE_OUTGOING_ARGS
4187 && (!current_function_is_leaf || current_function_calls_alloca))
4189 offset += current_function_outgoing_args_size;
4190 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4193 frame->outgoing_arguments_size = 0;
4195 /* Align stack boundary. Only needed if we're calling another function
4197 if (!current_function_is_leaf || current_function_calls_alloca)
4198 frame->padding2 = ((offset + preferred_alignment - 1)
4199 & -preferred_alignment) - offset;
4201 frame->padding2 = 0;
4203 offset += frame->padding2;
4205 /* We've reached end of stack frame. */
4206 frame->stack_pointer_offset = offset;
4208 /* Size prologue needs to allocate. */
4209 frame->to_allocate =
4210 (size + frame->padding1 + frame->padding2
4211 + frame->outgoing_arguments_size + frame->va_arg_size);
4213 if ((!frame->to_allocate && frame->nregs <= 1)
4214 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4215 frame->save_regs_using_mov = false;
4217 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4218 && current_function_is_leaf)
4220 frame->red_zone_size = frame->to_allocate;
4221 if (frame->save_regs_using_mov)
4222 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4223 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4224 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4227 frame->red_zone_size = 0;
4228 frame->to_allocate -= frame->red_zone_size;
4229 frame->stack_pointer_offset -= frame->red_zone_size;
4231 fprintf (stderr, "nregs: %i\n", frame->nregs);
4232 fprintf (stderr, "size: %i\n", size);
4233 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4234 fprintf (stderr, "padding1: %i\n", frame->padding1);
4235 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4236 fprintf (stderr, "padding2: %i\n", frame->padding2);
4237 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4238 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4239 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4240 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4241 frame->hard_frame_pointer_offset);
4242 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4246 /* Emit code to save registers in the prologue. */
4249 ix86_emit_save_regs (void)
4254 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4255 if (ix86_save_reg (regno, true))
4257 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4258 RTX_FRAME_RELATED_P (insn) = 1;
4262 /* Emit code to save registers using MOV insns. First register
4263 is restored from POINTER + OFFSET. */
4265 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4270 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4271 if (ix86_save_reg (regno, true))
4273 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4275 gen_rtx_REG (Pmode, regno));
4276 RTX_FRAME_RELATED_P (insn) = 1;
4277 offset += UNITS_PER_WORD;
4281 /* Expand prologue or epilogue stack adjustment.
4282 The pattern exist to put a dependency on all ebp-based memory accesses.
4283 STYLE should be negative if instructions should be marked as frame related,
4284 zero if %r11 register is live and cannot be freely used and positive
4288 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4293 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4294 else if (x86_64_immediate_operand (offset, DImode))
4295 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4299 /* r11 is used by indirect sibcall return as well, set before the
4300 epilogue and used after the epilogue. ATM indirect sibcall
4301 shouldn't be used together with huge frame sizes in one
4302 function because of the frame_size check in sibcall.c. */
4305 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4306 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4308 RTX_FRAME_RELATED_P (insn) = 1;
4309 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4313 RTX_FRAME_RELATED_P (insn) = 1;
4316 /* Expand the prologue into a bunch of separate insns. */
4319 ix86_expand_prologue (void)
4323 struct ix86_frame frame;
4324 HOST_WIDE_INT allocate;
4326 ix86_compute_frame_layout (&frame);
4328 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4329 slower on all targets. Also sdb doesn't like it. */
4331 if (frame_pointer_needed)
4333 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4334 RTX_FRAME_RELATED_P (insn) = 1;
4336 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4337 RTX_FRAME_RELATED_P (insn) = 1;
4340 allocate = frame.to_allocate;
4342 if (!frame.save_regs_using_mov)
4343 ix86_emit_save_regs ();
4345 allocate += frame.nregs * UNITS_PER_WORD;
4347 /* When using red zone we may start register saving before allocating
4348 the stack frame saving one cycle of the prologue. */
4349 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4350 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4351 : stack_pointer_rtx,
4352 -frame.nregs * UNITS_PER_WORD);
4356 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4357 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4358 GEN_INT (-allocate), -1);
4361 /* Only valid for Win32. */
4362 rtx eax = gen_rtx_REG (SImode, 0);
4363 bool eax_live = ix86_eax_live_at_start_p ();
4371 emit_insn (gen_push (eax));
4375 emit_move_insn (eax, GEN_INT (allocate));
4377 insn = emit_insn (gen_allocate_stack_worker (eax));
4378 RTX_FRAME_RELATED_P (insn) = 1;
4379 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4380 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4381 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4382 t, REG_NOTES (insn));
4386 if (frame_pointer_needed)
4387 t = plus_constant (hard_frame_pointer_rtx,
4390 - frame.nregs * UNITS_PER_WORD);
4392 t = plus_constant (stack_pointer_rtx, allocate);
4393 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4397 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4399 if (!frame_pointer_needed || !frame.to_allocate)
4400 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4402 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4403 -frame.nregs * UNITS_PER_WORD);
4406 pic_reg_used = false;
4407 if (pic_offset_table_rtx
4408 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4409 || current_function_profile))
4411 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4413 if (alt_pic_reg_used != INVALID_REGNUM)
4414 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4416 pic_reg_used = true;
4421 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4423 /* Even with accurate pre-reload life analysis, we can wind up
4424 deleting all references to the pic register after reload.
4425 Consider if cross-jumping unifies two sides of a branch
4426 controlled by a comparison vs the only read from a global.
4427 In which case, allow the set_got to be deleted, though we're
4428 too late to do anything about the ebx save in the prologue. */
4429 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4432 /* Prevent function calls from be scheduled before the call to mcount.
4433 In the pic_reg_used case, make sure that the got load isn't deleted. */
4434 if (current_function_profile)
4435 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4438 /* Emit code to restore saved registers using MOV insns. First register
4439 is restored from POINTER + OFFSET. */
4441 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4442 int maybe_eh_return)
4445 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4447 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4448 if (ix86_save_reg (regno, maybe_eh_return))
4450 /* Ensure that adjust_address won't be forced to produce pointer
4451 out of range allowed by x86-64 instruction set. */
4452 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4456 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4457 emit_move_insn (r11, GEN_INT (offset));
4458 emit_insn (gen_adddi3 (r11, r11, pointer));
4459 base_address = gen_rtx_MEM (Pmode, r11);
4462 emit_move_insn (gen_rtx_REG (Pmode, regno),
4463 adjust_address (base_address, Pmode, offset));
4464 offset += UNITS_PER_WORD;
4468 /* Restore function stack, frame, and registers. */
4471 ix86_expand_epilogue (int style)
4474 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4475 struct ix86_frame frame;
4476 HOST_WIDE_INT offset;
4478 ix86_compute_frame_layout (&frame);
4480 /* Calculate start of saved registers relative to ebp. Special care
4481 must be taken for the normal return case of a function using
4482 eh_return: the eax and edx registers are marked as saved, but not
4483 restored along this path. */
4484 offset = frame.nregs;
4485 if (current_function_calls_eh_return && style != 2)
4487 offset *= -UNITS_PER_WORD;
4489 /* If we're only restoring one register and sp is not valid then
4490 using a move instruction to restore the register since it's
4491 less work than reloading sp and popping the register.
4493 The default code result in stack adjustment using add/lea instruction,
4494 while this code results in LEAVE instruction (or discrete equivalent),
4495 so it is profitable in some other cases as well. Especially when there
4496 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4497 and there is exactly one register to pop. This heuristic may need some
4498 tuning in future. */
4499 if ((!sp_valid && frame.nregs <= 1)
4500 || (TARGET_EPILOGUE_USING_MOVE
4501 && cfun->machine->use_fast_prologue_epilogue
4502 && (frame.nregs > 1 || frame.to_allocate))
4503 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4504 || (frame_pointer_needed && TARGET_USE_LEAVE
4505 && cfun->machine->use_fast_prologue_epilogue
4506 && frame.nregs == 1)
4507 || current_function_calls_eh_return)
4509 /* Restore registers. We can use ebp or esp to address the memory
4510 locations. If both are available, default to ebp, since offsets
4511 are known to be small. Only exception is esp pointing directly to the
4512 end of block of saved registers, where we may simplify addressing
4515 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4516 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4517 frame.to_allocate, style == 2);
4519 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4520 offset, style == 2);
4522 /* eh_return epilogues need %ecx added to the stack pointer. */
4525 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4527 if (frame_pointer_needed)
4529 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4530 tmp = plus_constant (tmp, UNITS_PER_WORD);
4531 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4533 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4534 emit_move_insn (hard_frame_pointer_rtx, tmp);
4536 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4541 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4542 tmp = plus_constant (tmp, (frame.to_allocate
4543 + frame.nregs * UNITS_PER_WORD));
4544 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
4547 else if (!frame_pointer_needed)
4548 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4549 GEN_INT (frame.to_allocate
4550 + frame.nregs * UNITS_PER_WORD),
4552 /* If not an i386, mov & pop is faster than "leave". */
4553 else if (TARGET_USE_LEAVE || optimize_size
4554 || !cfun->machine->use_fast_prologue_epilogue)
4555 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4558 pro_epilogue_adjust_stack (stack_pointer_rtx,
4559 hard_frame_pointer_rtx,
4562 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4564 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4569 /* First step is to deallocate the stack frame so that we can
4570 pop the registers. */
4573 if (!frame_pointer_needed)
4575 pro_epilogue_adjust_stack (stack_pointer_rtx,
4576 hard_frame_pointer_rtx,
4577 GEN_INT (offset), style);
4579 else if (frame.to_allocate)
4580 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4581 GEN_INT (frame.to_allocate), style);
4583 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4584 if (ix86_save_reg (regno, false))
4587 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
4589 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
4591 if (frame_pointer_needed)
4593 /* Leave results in shorter dependency chains on CPUs that are
4594 able to grok it fast. */
4595 if (TARGET_USE_LEAVE)
4596 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4597 else if (TARGET_64BIT)
4598 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4600 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4604 /* Sibcall epilogues don't want a return instruction. */
4608 if (current_function_pops_args && current_function_args_size)
4610 rtx popc = GEN_INT (current_function_pops_args);
4612 /* i386 can only pop 64K bytes. If asked to pop more, pop
4613 return address, do explicit add, and jump indirectly to the
4616 if (current_function_pops_args >= 65536)
4618 rtx ecx = gen_rtx_REG (SImode, 2);
4620 /* There is no "pascal" calling convention in 64bit ABI. */
4624 emit_insn (gen_popsi1 (ecx));
4625 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
4626 emit_jump_insn (gen_return_indirect_internal (ecx));
4629 emit_jump_insn (gen_return_pop_internal (popc));
4632 emit_jump_insn (gen_return_internal ());
4635 /* Reset from the function's potential modifications. */
4638 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4639 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4641 if (pic_offset_table_rtx)
4642 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
4645 /* Extract the parts of an RTL expression that is a valid memory address
4646 for an instruction. Return 0 if the structure of the address is
4647 grossly off. Return -1 if the address contains ASHIFT, so it is not
4648 strictly valid, but still used for computing length of lea instruction. */
4651 ix86_decompose_address (rtx addr, struct ix86_address *out)
4653 rtx base = NULL_RTX;
4654 rtx index = NULL_RTX;
4655 rtx disp = NULL_RTX;
4656 HOST_WIDE_INT scale = 1;
4657 rtx scale_rtx = NULL_RTX;
4659 enum ix86_address_seg seg = SEG_DEFAULT;
4661 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
4663 else if (GET_CODE (addr) == PLUS)
4673 addends[n++] = XEXP (op, 1);
4676 while (GET_CODE (op) == PLUS);
4681 for (i = n; i >= 0; --i)
4684 switch (GET_CODE (op))
4689 index = XEXP (op, 0);
4690 scale_rtx = XEXP (op, 1);
4694 if (XINT (op, 1) == UNSPEC_TP
4695 && TARGET_TLS_DIRECT_SEG_REFS
4696 && seg == SEG_DEFAULT)
4697 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
4726 else if (GET_CODE (addr) == MULT)
4728 index = XEXP (addr, 0); /* index*scale */
4729 scale_rtx = XEXP (addr, 1);
4731 else if (GET_CODE (addr) == ASHIFT)
4735 /* We're called for lea too, which implements ashift on occasion. */
4736 index = XEXP (addr, 0);
4737 tmp = XEXP (addr, 1);
4738 if (GET_CODE (tmp) != CONST_INT)
4740 scale = INTVAL (tmp);
4741 if ((unsigned HOST_WIDE_INT) scale > 3)
4747 disp = addr; /* displacement */
4749 /* Extract the integral value of scale. */
4752 if (GET_CODE (scale_rtx) != CONST_INT)
4754 scale = INTVAL (scale_rtx);
4757 /* Allow arg pointer and stack pointer as index if there is not scaling. */
4758 if (base && index && scale == 1
4759 && (index == arg_pointer_rtx
4760 || index == frame_pointer_rtx
4761 || (REG_P (index) && REGNO (index) == STACK_POINTER_REGNUM)))
4768 /* Special case: %ebp cannot be encoded as a base without a displacement. */
4769 if ((base == hard_frame_pointer_rtx
4770 || base == frame_pointer_rtx
4771 || base == arg_pointer_rtx) && !disp)
4774 /* Special case: on K6, [%esi] makes the instruction vector decoded.
4775 Avoid this by transforming to [%esi+0]. */
4776 if (ix86_tune == PROCESSOR_K6 && !optimize_size
4777 && base && !index && !disp
4779 && REGNO_REG_CLASS (REGNO (base)) == SIREG)
4782 /* Special case: encode reg+reg instead of reg*2. */
4783 if (!base && index && scale && scale == 2)
4784 base = index, scale = 1;
4786 /* Special case: scaling cannot be encoded without base or displacement. */
4787 if (!base && !disp && index && scale != 1)
4799 /* Return cost of the memory address x.
4800 For i386, it is better to use a complex address than let gcc copy
4801 the address into a reg and make a new pseudo. But not if the address
4802 requires to two regs - that would mean more pseudos with longer
4805 ix86_address_cost (rtx x)
4807 struct ix86_address parts;
4810 if (!ix86_decompose_address (x, &parts))
4813 /* More complex memory references are better. */
4814 if (parts.disp && parts.disp != const0_rtx)
4816 if (parts.seg != SEG_DEFAULT)
4819 /* Attempt to minimize number of registers in the address. */
4821 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
4823 && (!REG_P (parts.index)
4824 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
4828 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
4830 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
4831 && parts.base != parts.index)
4834 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
4835 since it's predecode logic can't detect the length of instructions
4836 and it degenerates to vector decoded. Increase cost of such
4837 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
4838 to split such addresses or even refuse such addresses at all.
4840 Following addressing modes are affected:
4845 The first and last case may be avoidable by explicitly coding the zero in
4846 memory address, but I don't have AMD-K6 machine handy to check this
4850 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
4851 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
4852 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
4858 /* If X is a machine specific address (i.e. a symbol or label being
4859 referenced as a displacement from the GOT implemented using an
4860 UNSPEC), then return the base term. Otherwise return X. */
4863 ix86_find_base_term (rtx x)
4869 if (GET_CODE (x) != CONST)
4872 if (GET_CODE (term) == PLUS
4873 && (GET_CODE (XEXP (term, 1)) == CONST_INT
4874 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
4875 term = XEXP (term, 0);
4876 if (GET_CODE (term) != UNSPEC
4877 || XINT (term, 1) != UNSPEC_GOTPCREL)
4880 term = XVECEXP (term, 0, 0);
4882 if (GET_CODE (term) != SYMBOL_REF
4883 && GET_CODE (term) != LABEL_REF)
4889 term = ix86_delegitimize_address (x);
4891 if (GET_CODE (term) != SYMBOL_REF
4892 && GET_CODE (term) != LABEL_REF)
4898 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
4899 this is used for to form addresses to local data when -fPIC is in
4903 darwin_local_data_pic (rtx disp)
4905 if (GET_CODE (disp) == MINUS)
4907 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
4908 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
4909 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
4911 const char *sym_name = XSTR (XEXP (disp, 1), 0);
4912 if (! strcmp (sym_name, "<pic base>"))
4920 /* Determine if a given RTX is a valid constant. We already know this
4921 satisfies CONSTANT_P. */
4924 legitimate_constant_p (rtx x)
4926 switch (GET_CODE (x))
4931 if (GET_CODE (x) == PLUS)
4933 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4938 if (TARGET_MACHO && darwin_local_data_pic (x))
4941 /* Only some unspecs are valid as "constants". */
4942 if (GET_CODE (x) == UNSPEC)
4943 switch (XINT (x, 1))
4947 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4949 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4954 /* We must have drilled down to a symbol. */
4955 if (!symbolic_operand (x, Pmode))
4960 /* TLS symbols are never valid. */
4961 if (tls_symbolic_operand (x, Pmode))
4969 /* Otherwise we handle everything else in the move patterns. */
4973 /* Determine if it's legal to put X into the constant pool. This
4974 is not possible for the address of thread-local symbols, which
4975 is checked above. */
4978 ix86_cannot_force_const_mem (rtx x)
4980 return !legitimate_constant_p (x);
4983 /* Determine if a given RTX is a valid constant address. */
4986 constant_address_p (rtx x)
4988 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
4991 /* Nonzero if the constant value X is a legitimate general operand
4992 when generating PIC code. It is given that flag_pic is on and
4993 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
4996 legitimate_pic_operand_p (rtx x)
5000 switch (GET_CODE (x))
5003 inner = XEXP (x, 0);
5005 /* Only some unspecs are valid as "constants". */
5006 if (GET_CODE (inner) == UNSPEC)
5007 switch (XINT (inner, 1))
5010 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5018 return legitimate_pic_address_disp_p (x);
5025 /* Determine if a given CONST RTX is a valid memory displacement
5029 legitimate_pic_address_disp_p (rtx disp)
5033 /* In 64bit mode we can allow direct addresses of symbols and labels
5034 when they are not dynamic symbols. */
5037 /* TLS references should always be enclosed in UNSPEC. */
5038 if (tls_symbolic_operand (disp, GET_MODE (disp)))
5040 if (GET_CODE (disp) == SYMBOL_REF
5041 && ix86_cmodel == CM_SMALL_PIC
5042 && SYMBOL_REF_LOCAL_P (disp))
5044 if (GET_CODE (disp) == LABEL_REF)
5046 if (GET_CODE (disp) == CONST
5047 && GET_CODE (XEXP (disp, 0)) == PLUS)
5049 rtx op0 = XEXP (XEXP (disp, 0), 0);
5050 rtx op1 = XEXP (XEXP (disp, 0), 1);
5052 /* TLS references should always be enclosed in UNSPEC. */
5053 if (tls_symbolic_operand (op0, GET_MODE (op0)))
5055 if (((GET_CODE (op0) == SYMBOL_REF
5056 && ix86_cmodel == CM_SMALL_PIC
5057 && SYMBOL_REF_LOCAL_P (op0))
5058 || GET_CODE (op0) == LABEL_REF)
5059 && GET_CODE (op1) == CONST_INT
5060 && INTVAL (op1) < 16*1024*1024
5061 && INTVAL (op1) >= -16*1024*1024)
5065 if (GET_CODE (disp) != CONST)
5067 disp = XEXP (disp, 0);
5071 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5072 of GOT tables. We should not need these anyway. */
5073 if (GET_CODE (disp) != UNSPEC
5074 || XINT (disp, 1) != UNSPEC_GOTPCREL)
5077 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5078 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5084 if (GET_CODE (disp) == PLUS)
5086 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5088 disp = XEXP (disp, 0);
5092 if (TARGET_MACHO && darwin_local_data_pic (disp))
5095 if (GET_CODE (disp) != UNSPEC)
5098 switch (XINT (disp, 1))
5103 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5105 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5106 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5107 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5109 case UNSPEC_GOTTPOFF:
5110 case UNSPEC_GOTNTPOFF:
5111 case UNSPEC_INDNTPOFF:
5114 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5116 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5118 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5124 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5125 memory address for an instruction. The MODE argument is the machine mode
5126 for the MEM expression that wants to use this address.
5128 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5129 convert common non-canonical forms to canonical form so that they will
5133 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5135 struct ix86_address parts;
5136 rtx base, index, disp;
5137 HOST_WIDE_INT scale;
5138 const char *reason = NULL;
5139 rtx reason_rtx = NULL_RTX;
5141 if (TARGET_DEBUG_ADDR)
5144 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5145 GET_MODE_NAME (mode), strict);
5149 if (ix86_decompose_address (addr, &parts) <= 0)
5151 reason = "decomposition failed";
5156 index = parts.index;
5158 scale = parts.scale;
5160 /* Validate base register.
5162 Don't allow SUBREG's here, it can lead to spill failures when the base
5163 is one word out of a two word structure, which is represented internally
5170 if (GET_CODE (base) != REG)
5172 reason = "base is not a register";
5176 if (GET_MODE (base) != Pmode)
5178 reason = "base is not in Pmode";
5182 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base))
5183 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base)))
5185 reason = "base is not valid";
5190 /* Validate index register.
5192 Don't allow SUBREG's here, it can lead to spill failures when the index
5193 is one word out of a two word structure, which is represented internally
5200 if (GET_CODE (index) != REG)
5202 reason = "index is not a register";
5206 if (GET_MODE (index) != Pmode)
5208 reason = "index is not in Pmode";
5212 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (index))
5213 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (index)))
5215 reason = "index is not valid";
5220 /* Validate scale factor. */
5223 reason_rtx = GEN_INT (scale);
5226 reason = "scale without index";
5230 if (scale != 2 && scale != 4 && scale != 8)
5232 reason = "scale is not a valid multiplier";
5237 /* Validate displacement. */
5242 if (GET_CODE (disp) == CONST
5243 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5244 switch (XINT (XEXP (disp, 0), 1))
5248 case UNSPEC_GOTPCREL:
5251 goto is_legitimate_pic;
5253 case UNSPEC_GOTTPOFF:
5254 case UNSPEC_GOTNTPOFF:
5255 case UNSPEC_INDNTPOFF:
5261 reason = "invalid address unspec";
5265 else if (flag_pic && (SYMBOLIC_CONST (disp)
5267 && !machopic_operand_p (disp)
5272 if (TARGET_64BIT && (index || base))
5274 /* foo@dtpoff(%rX) is ok. */
5275 if (GET_CODE (disp) != CONST
5276 || GET_CODE (XEXP (disp, 0)) != PLUS
5277 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5278 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5279 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5280 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5282 reason = "non-constant pic memory reference";
5286 else if (! legitimate_pic_address_disp_p (disp))
5288 reason = "displacement is an invalid pic construct";
5292 /* This code used to verify that a symbolic pic displacement
5293 includes the pic_offset_table_rtx register.
5295 While this is good idea, unfortunately these constructs may
5296 be created by "adds using lea" optimization for incorrect
5305 This code is nonsensical, but results in addressing
5306 GOT table with pic_offset_table_rtx base. We can't
5307 just refuse it easily, since it gets matched by
5308 "addsi3" pattern, that later gets split to lea in the
5309 case output register differs from input. While this
5310 can be handled by separate addsi pattern for this case
5311 that never results in lea, this seems to be easier and
5312 correct fix for crash to disable this test. */
5314 else if (GET_CODE (disp) != LABEL_REF
5315 && GET_CODE (disp) != CONST_INT
5316 && (GET_CODE (disp) != CONST
5317 || !legitimate_constant_p (disp))
5318 && (GET_CODE (disp) != SYMBOL_REF
5319 || !legitimate_constant_p (disp)))
5321 reason = "displacement is not constant";
5324 else if (TARGET_64BIT
5325 && !x86_64_immediate_operand (disp, VOIDmode))
5327 reason = "displacement is out of range";
5332 /* Everything looks valid. */
5333 if (TARGET_DEBUG_ADDR)
5334 fprintf (stderr, "Success.\n");
5338 if (TARGET_DEBUG_ADDR)
5340 fprintf (stderr, "Error: %s\n", reason);
5341 debug_rtx (reason_rtx);
5346 /* Return an unique alias set for the GOT. */
5348 static HOST_WIDE_INT
5349 ix86_GOT_alias_set (void)
5351 static HOST_WIDE_INT set = -1;
5353 set = new_alias_set ();
5357 /* Return a legitimate reference for ORIG (an address) using the
5358 register REG. If REG is 0, a new pseudo is generated.
5360 There are two types of references that must be handled:
5362 1. Global data references must load the address from the GOT, via
5363 the PIC reg. An insn is emitted to do this load, and the reg is
5366 2. Static data references, constant pool addresses, and code labels
5367 compute the address as an offset from the GOT, whose base is in
5368 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5369 differentiate them from global data objects. The returned
5370 address is the PIC reg + an unspec constant.
5372 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5373 reg also appears in the address. */
5376 legitimize_pic_address (rtx orig, rtx reg)
5384 reg = gen_reg_rtx (Pmode);
5385 /* Use the generic Mach-O PIC machinery. */
5386 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5389 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5391 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5393 /* This symbol may be referenced via a displacement from the PIC
5394 base address (@GOTOFF). */
5396 if (reload_in_progress)
5397 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5398 if (GET_CODE (addr) == CONST)
5399 addr = XEXP (addr, 0);
5400 if (GET_CODE (addr) == PLUS)
5402 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5403 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5406 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5407 new = gen_rtx_CONST (Pmode, new);
5408 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5412 emit_move_insn (reg, new);
5416 else if (GET_CODE (addr) == SYMBOL_REF)
5420 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5421 new = gen_rtx_CONST (Pmode, new);
5422 new = gen_const_mem (Pmode, new);
5423 set_mem_alias_set (new, ix86_GOT_alias_set ());
5426 reg = gen_reg_rtx (Pmode);
5427 /* Use directly gen_movsi, otherwise the address is loaded
5428 into register for CSE. We don't want to CSE this addresses,
5429 instead we CSE addresses from the GOT table, so skip this. */
5430 emit_insn (gen_movsi (reg, new));
5435 /* This symbol must be referenced via a load from the
5436 Global Offset Table (@GOT). */
5438 if (reload_in_progress)
5439 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5440 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5441 new = gen_rtx_CONST (Pmode, new);
5442 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5443 new = gen_const_mem (Pmode, new);
5444 set_mem_alias_set (new, ix86_GOT_alias_set ());
5447 reg = gen_reg_rtx (Pmode);
5448 emit_move_insn (reg, new);
5454 if (GET_CODE (addr) == CONST)
5456 addr = XEXP (addr, 0);
5458 /* We must match stuff we generate before. Assume the only
5459 unspecs that can get here are ours. Not that we could do
5460 anything with them anyway.... */
5461 if (GET_CODE (addr) == UNSPEC
5462 || (GET_CODE (addr) == PLUS
5463 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5465 if (GET_CODE (addr) != PLUS)
5468 if (GET_CODE (addr) == PLUS)
5470 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5472 /* Check first to see if this is a constant offset from a @GOTOFF
5473 symbol reference. */
5474 if (local_symbolic_operand (op0, Pmode)
5475 && GET_CODE (op1) == CONST_INT)
5479 if (reload_in_progress)
5480 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5481 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
5483 new = gen_rtx_PLUS (Pmode, new, op1);
5484 new = gen_rtx_CONST (Pmode, new);
5485 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5489 emit_move_insn (reg, new);
5495 if (INTVAL (op1) < -16*1024*1024
5496 || INTVAL (op1) >= 16*1024*1024)
5497 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
5502 base = legitimize_pic_address (XEXP (addr, 0), reg);
5503 new = legitimize_pic_address (XEXP (addr, 1),
5504 base == reg ? NULL_RTX : reg);
5506 if (GET_CODE (new) == CONST_INT)
5507 new = plus_constant (base, INTVAL (new));
5510 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
5512 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
5513 new = XEXP (new, 1);
5515 new = gen_rtx_PLUS (Pmode, base, new);
5523 /* Load the thread pointer. If TO_REG is true, force it into a register. */
5526 get_thread_pointer (int to_reg)
5530 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
5534 reg = gen_reg_rtx (Pmode);
5535 insn = gen_rtx_SET (VOIDmode, reg, tp);
5536 insn = emit_insn (insn);
5541 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
5542 false if we expect this to be used for a memory address and true if
5543 we expect to load the address into a register. */
5546 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
5548 rtx dest, base, off, pic;
5553 case TLS_MODEL_GLOBAL_DYNAMIC:
5554 dest = gen_reg_rtx (Pmode);
5557 rtx rax = gen_rtx_REG (Pmode, 0), insns;
5560 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
5561 insns = get_insns ();
5564 emit_libcall_block (insns, dest, rax, x);
5567 emit_insn (gen_tls_global_dynamic_32 (dest, x));
5570 case TLS_MODEL_LOCAL_DYNAMIC:
5571 base = gen_reg_rtx (Pmode);
5574 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
5577 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
5578 insns = get_insns ();
5581 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
5582 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
5583 emit_libcall_block (insns, base, rax, note);
5586 emit_insn (gen_tls_local_dynamic_base_32 (base));
5588 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
5589 off = gen_rtx_CONST (Pmode, off);
5591 return gen_rtx_PLUS (Pmode, base, off);
5593 case TLS_MODEL_INITIAL_EXEC:
5597 type = UNSPEC_GOTNTPOFF;
5601 if (reload_in_progress)
5602 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5603 pic = pic_offset_table_rtx;
5604 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
5606 else if (!TARGET_GNU_TLS)
5608 pic = gen_reg_rtx (Pmode);
5609 emit_insn (gen_set_got (pic));
5610 type = UNSPEC_GOTTPOFF;
5615 type = UNSPEC_INDNTPOFF;
5618 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
5619 off = gen_rtx_CONST (Pmode, off);
5621 off = gen_rtx_PLUS (Pmode, pic, off);
5622 off = gen_const_mem (Pmode, off);
5623 set_mem_alias_set (off, ix86_GOT_alias_set ());
5625 if (TARGET_64BIT || TARGET_GNU_TLS)
5627 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5628 off = force_reg (Pmode, off);
5629 return gen_rtx_PLUS (Pmode, base, off);
5633 base = get_thread_pointer (true);
5634 dest = gen_reg_rtx (Pmode);
5635 emit_insn (gen_subsi3 (dest, base, off));
5639 case TLS_MODEL_LOCAL_EXEC:
5640 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
5641 (TARGET_64BIT || TARGET_GNU_TLS)
5642 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
5643 off = gen_rtx_CONST (Pmode, off);
5645 if (TARGET_64BIT || TARGET_GNU_TLS)
5647 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5648 return gen_rtx_PLUS (Pmode, base, off);
5652 base = get_thread_pointer (true);
5653 dest = gen_reg_rtx (Pmode);
5654 emit_insn (gen_subsi3 (dest, base, off));
5665 /* Try machine-dependent ways of modifying an illegitimate address
5666 to be legitimate. If we find one, return the new, valid address.
5667 This macro is used in only one place: `memory_address' in explow.c.
5669 OLDX is the address as it was before break_out_memory_refs was called.
5670 In some cases it is useful to look at this to decide what needs to be done.
5672 MODE and WIN are passed so that this macro can use
5673 GO_IF_LEGITIMATE_ADDRESS.
5675 It is always safe for this macro to do nothing. It exists to recognize
5676 opportunities to optimize the output.
5678 For the 80386, we handle X+REG by loading X into a register R and
5679 using R+REG. R will go in a general reg and indexing will be used.
5680 However, if REG is a broken-out memory address or multiplication,
5681 nothing needs to be done because REG can certainly go in a general reg.
5683 When -fpic is used, special handling is needed for symbolic references.
5684 See comments by legitimize_pic_address in i386.c for details. */
5687 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
5692 if (TARGET_DEBUG_ADDR)
5694 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
5695 GET_MODE_NAME (mode));
5699 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
5701 return legitimize_tls_address (x, log, false);
5702 if (GET_CODE (x) == CONST
5703 && GET_CODE (XEXP (x, 0)) == PLUS
5704 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5705 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
5707 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
5708 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
5711 if (flag_pic && SYMBOLIC_CONST (x))
5712 return legitimize_pic_address (x, 0);
5714 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
5715 if (GET_CODE (x) == ASHIFT
5716 && GET_CODE (XEXP (x, 1)) == CONST_INT
5717 && (log = (unsigned) exact_log2 (INTVAL (XEXP (x, 1)))) < 4)
5720 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
5721 GEN_INT (1 << log));
5724 if (GET_CODE (x) == PLUS)
5726 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
5728 if (GET_CODE (XEXP (x, 0)) == ASHIFT
5729 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5730 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4)
5733 XEXP (x, 0) = gen_rtx_MULT (Pmode,
5734 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
5735 GEN_INT (1 << log));
5738 if (GET_CODE (XEXP (x, 1)) == ASHIFT
5739 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5740 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1)))) < 4)
5743 XEXP (x, 1) = gen_rtx_MULT (Pmode,
5744 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
5745 GEN_INT (1 << log));
5748 /* Put multiply first if it isn't already. */
5749 if (GET_CODE (XEXP (x, 1)) == MULT)
5751 rtx tmp = XEXP (x, 0);
5752 XEXP (x, 0) = XEXP (x, 1);
5757 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
5758 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
5759 created by virtual register instantiation, register elimination, and
5760 similar optimizations. */
5761 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
5764 x = gen_rtx_PLUS (Pmode,
5765 gen_rtx_PLUS (Pmode, XEXP (x, 0),
5766 XEXP (XEXP (x, 1), 0)),
5767 XEXP (XEXP (x, 1), 1));
5771 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
5772 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
5773 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
5774 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5775 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
5776 && CONSTANT_P (XEXP (x, 1)))
5779 rtx other = NULL_RTX;
5781 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5783 constant = XEXP (x, 1);
5784 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
5786 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
5788 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
5789 other = XEXP (x, 1);
5797 x = gen_rtx_PLUS (Pmode,
5798 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
5799 XEXP (XEXP (XEXP (x, 0), 1), 0)),
5800 plus_constant (other, INTVAL (constant)));
5804 if (changed && legitimate_address_p (mode, x, FALSE))
5807 if (GET_CODE (XEXP (x, 0)) == MULT)
5810 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
5813 if (GET_CODE (XEXP (x, 1)) == MULT)
5816 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
5820 && GET_CODE (XEXP (x, 1)) == REG
5821 && GET_CODE (XEXP (x, 0)) == REG)
5824 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
5827 x = legitimize_pic_address (x, 0);
5830 if (changed && legitimate_address_p (mode, x, FALSE))
5833 if (GET_CODE (XEXP (x, 0)) == REG)
5835 rtx temp = gen_reg_rtx (Pmode);
5836 rtx val = force_operand (XEXP (x, 1), temp);
5838 emit_move_insn (temp, val);
5844 else if (GET_CODE (XEXP (x, 1)) == REG)
5846 rtx temp = gen_reg_rtx (Pmode);
5847 rtx val = force_operand (XEXP (x, 0), temp);
5849 emit_move_insn (temp, val);
5859 /* Print an integer constant expression in assembler syntax. Addition
5860 and subtraction are the only arithmetic that may appear in these
5861 expressions. FILE is the stdio stream to write to, X is the rtx, and
5862 CODE is the operand print code from the output string. */
5865 output_pic_addr_const (FILE *file, rtx x, int code)
5869 switch (GET_CODE (x))
5879 /* Mark the decl as referenced so that cgraph will output the function. */
5880 if (SYMBOL_REF_DECL (x))
5881 mark_decl_referenced (SYMBOL_REF_DECL (x));
5883 assemble_name (file, XSTR (x, 0));
5884 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
5885 fputs ("@PLT", file);
5892 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
5893 assemble_name (asm_out_file, buf);
5897 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5901 /* This used to output parentheses around the expression,
5902 but that does not work on the 386 (either ATT or BSD assembler). */
5903 output_pic_addr_const (file, XEXP (x, 0), code);
5907 if (GET_MODE (x) == VOIDmode)
5909 /* We can use %d if the number is <32 bits and positive. */
5910 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
5911 fprintf (file, "0x%lx%08lx",
5912 (unsigned long) CONST_DOUBLE_HIGH (x),
5913 (unsigned long) CONST_DOUBLE_LOW (x));
5915 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
5918 /* We can't handle floating point constants;
5919 PRINT_OPERAND must handle them. */
5920 output_operand_lossage ("floating constant misused");
5924 /* Some assemblers need integer constants to appear first. */
5925 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5927 output_pic_addr_const (file, XEXP (x, 0), code);
5929 output_pic_addr_const (file, XEXP (x, 1), code);
5931 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5933 output_pic_addr_const (file, XEXP (x, 1), code);
5935 output_pic_addr_const (file, XEXP (x, 0), code);
5943 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
5944 output_pic_addr_const (file, XEXP (x, 0), code);
5946 output_pic_addr_const (file, XEXP (x, 1), code);
5948 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
5952 if (XVECLEN (x, 0) != 1)
5954 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
5955 switch (XINT (x, 1))
5958 fputs ("@GOT", file);
5961 fputs ("@GOTOFF", file);
5963 case UNSPEC_GOTPCREL:
5964 fputs ("@GOTPCREL(%rip)", file);
5966 case UNSPEC_GOTTPOFF:
5967 /* FIXME: This might be @TPOFF in Sun ld too. */
5968 fputs ("@GOTTPOFF", file);
5971 fputs ("@TPOFF", file);
5975 fputs ("@TPOFF", file);
5977 fputs ("@NTPOFF", file);
5980 fputs ("@DTPOFF", file);
5982 case UNSPEC_GOTNTPOFF:
5984 fputs ("@GOTTPOFF(%rip)", file);
5986 fputs ("@GOTNTPOFF", file);
5988 case UNSPEC_INDNTPOFF:
5989 fputs ("@INDNTPOFF", file);
5992 output_operand_lossage ("invalid UNSPEC as operand");
5998 output_operand_lossage ("invalid expression as operand");
6002 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
6003 We need to emit DTP-relative relocations. */
6006 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6008 fputs (ASM_LONG, file);
6009 output_addr_const (file, x);
6010 fputs ("@DTPOFF", file);
6016 fputs (", 0", file);
6023 /* In the name of slightly smaller debug output, and to cater to
6024 general assembler losage, recognize PIC+GOTOFF and turn it back
6025 into a direct symbol reference. */
6028 ix86_delegitimize_address (rtx orig_x)
6032 if (GET_CODE (x) == MEM)
6037 if (GET_CODE (x) != CONST
6038 || GET_CODE (XEXP (x, 0)) != UNSPEC
6039 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6040 || GET_CODE (orig_x) != MEM)
6042 return XVECEXP (XEXP (x, 0), 0, 0);
6045 if (GET_CODE (x) != PLUS
6046 || GET_CODE (XEXP (x, 1)) != CONST)
6049 if (GET_CODE (XEXP (x, 0)) == REG
6050 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6051 /* %ebx + GOT/GOTOFF */
6053 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6055 /* %ebx + %reg * scale + GOT/GOTOFF */
6057 if (GET_CODE (XEXP (y, 0)) == REG
6058 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6060 else if (GET_CODE (XEXP (y, 1)) == REG
6061 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6065 if (GET_CODE (y) != REG
6066 && GET_CODE (y) != MULT
6067 && GET_CODE (y) != ASHIFT)
6073 x = XEXP (XEXP (x, 1), 0);
6074 if (GET_CODE (x) == UNSPEC
6075 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6076 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6079 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6080 return XVECEXP (x, 0, 0);
6083 if (GET_CODE (x) == PLUS
6084 && GET_CODE (XEXP (x, 0)) == UNSPEC
6085 && GET_CODE (XEXP (x, 1)) == CONST_INT
6086 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6087 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6088 && GET_CODE (orig_x) != MEM)))
6090 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6092 return gen_rtx_PLUS (Pmode, y, x);
6100 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6105 if (mode == CCFPmode || mode == CCFPUmode)
6107 enum rtx_code second_code, bypass_code;
6108 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6109 if (bypass_code != UNKNOWN || second_code != UNKNOWN)
6111 code = ix86_fp_compare_code_to_integer (code);
6115 code = reverse_condition (code);
6126 if (mode != CCmode && mode != CCNOmode && mode != CCGCmode)
6131 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
6132 Those same assemblers have the same but opposite losage on cmov. */
6135 suffix = fp ? "nbe" : "a";
6138 if (mode == CCNOmode || mode == CCGOCmode)
6140 else if (mode == CCmode || mode == CCGCmode)
6151 if (mode == CCNOmode || mode == CCGOCmode)
6153 else if (mode == CCmode || mode == CCGCmode)
6162 suffix = fp ? "nb" : "ae";
6165 if (mode != CCmode && mode != CCGCmode && mode != CCNOmode)
6175 suffix = fp ? "u" : "p";
6178 suffix = fp ? "nu" : "np";
6183 fputs (suffix, file);
6186 /* Print the name of register X to FILE based on its machine mode and number.
6187 If CODE is 'w', pretend the mode is HImode.
6188 If CODE is 'b', pretend the mode is QImode.
6189 If CODE is 'k', pretend the mode is SImode.
6190 If CODE is 'q', pretend the mode is DImode.
6191 If CODE is 'h', pretend the reg is the `high' byte register.
6192 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6195 print_reg (rtx x, int code, FILE *file)
6197 if (REGNO (x) == ARG_POINTER_REGNUM
6198 || REGNO (x) == FRAME_POINTER_REGNUM
6199 || REGNO (x) == FLAGS_REG
6200 || REGNO (x) == FPSR_REG)
6203 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6206 if (code == 'w' || MMX_REG_P (x))
6208 else if (code == 'b')
6210 else if (code == 'k')
6212 else if (code == 'q')
6214 else if (code == 'y')
6216 else if (code == 'h')
6219 code = GET_MODE_SIZE (GET_MODE (x));
6221 /* Irritatingly, AMD extended registers use different naming convention
6222 from the normal registers. */
6223 if (REX_INT_REG_P (x))
6230 error ("extended registers have no high halves");
6233 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6236 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6239 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6242 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6245 error ("unsupported operand size for extended register");
6253 if (STACK_TOP_P (x))
6255 fputs ("st(0)", file);
6262 if (! ANY_FP_REG_P (x))
6263 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6268 fputs (hi_reg_name[REGNO (x)], file);
6271 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6273 fputs (qi_reg_name[REGNO (x)], file);
6276 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6278 fputs (qi_high_reg_name[REGNO (x)], file);
6285 /* Locate some local-dynamic symbol still in use by this function
6286 so that we can print its name in some tls_local_dynamic_base
6290 get_some_local_dynamic_name (void)
6294 if (cfun->machine->some_ld_name)
6295 return cfun->machine->some_ld_name;
6297 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6299 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6300 return cfun->machine->some_ld_name;
6306 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6310 if (GET_CODE (x) == SYMBOL_REF
6311 && local_dynamic_symbolic_operand (x, Pmode))
6313 cfun->machine->some_ld_name = XSTR (x, 0);
6321 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6322 C -- print opcode suffix for set/cmov insn.
6323 c -- like C, but print reversed condition
6324 F,f -- likewise, but for floating-point.
6325 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6327 R -- print the prefix for register names.
6328 z -- print the opcode suffix for the size of the current operand.
6329 * -- print a star (in certain assembler syntax)
6330 A -- print an absolute memory reference.
6331 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6332 s -- print a shift double count, followed by the assemblers argument
6334 b -- print the QImode name of the register for the indicated operand.
6335 %b0 would print %al if operands[0] is reg 0.
6336 w -- likewise, print the HImode name of the register.
6337 k -- likewise, print the SImode name of the register.
6338 q -- likewise, print the DImode name of the register.
6339 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6340 y -- print "st(0)" instead of "st" as a register.
6341 D -- print condition for SSE cmp instruction.
6342 P -- if PIC, print an @PLT suffix.
6343 X -- don't print any sort of PIC '@' suffix for a symbol.
6344 & -- print some in-use local-dynamic symbol name.
6345 H -- print a memory address offset by 8; used for sse high-parts
6349 print_operand (FILE *file, rtx x, int code)
6356 if (ASSEMBLER_DIALECT == ASM_ATT)
6361 assemble_name (file, get_some_local_dynamic_name ());
6365 if (ASSEMBLER_DIALECT == ASM_ATT)
6367 else if (ASSEMBLER_DIALECT == ASM_INTEL)
6369 /* Intel syntax. For absolute addresses, registers should not
6370 be surrounded by braces. */
6371 if (GET_CODE (x) != REG)
6374 PRINT_OPERAND (file, x, 0);
6382 PRINT_OPERAND (file, x, 0);
6387 if (ASSEMBLER_DIALECT == ASM_ATT)
6392 if (ASSEMBLER_DIALECT == ASM_ATT)
6397 if (ASSEMBLER_DIALECT == ASM_ATT)
6402 if (ASSEMBLER_DIALECT == ASM_ATT)
6407 if (ASSEMBLER_DIALECT == ASM_ATT)
6412 if (ASSEMBLER_DIALECT == ASM_ATT)
6417 /* 387 opcodes don't get size suffixes if the operands are
6419 if (STACK_REG_P (x))
6422 /* Likewise if using Intel opcodes. */
6423 if (ASSEMBLER_DIALECT == ASM_INTEL)
6426 /* This is the size of op from size of operand. */
6427 switch (GET_MODE_SIZE (GET_MODE (x)))
6430 #ifdef HAVE_GAS_FILDS_FISTS
6436 if (GET_MODE (x) == SFmode)
6451 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6453 #ifdef GAS_MNEMONICS
6479 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
6481 PRINT_OPERAND (file, x, 0);
6487 /* Little bit of braindamage here. The SSE compare instructions
6488 does use completely different names for the comparisons that the
6489 fp conditional moves. */
6490 switch (GET_CODE (x))
6505 fputs ("unord", file);
6509 fputs ("neq", file);
6513 fputs ("nlt", file);
6517 fputs ("nle", file);
6520 fputs ("ord", file);
6528 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6529 if (ASSEMBLER_DIALECT == ASM_ATT)
6531 switch (GET_MODE (x))
6533 case HImode: putc ('w', file); break;
6535 case SFmode: putc ('l', file); break;
6537 case DFmode: putc ('q', file); break;
6545 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
6548 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6549 if (ASSEMBLER_DIALECT == ASM_ATT)
6552 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
6555 /* Like above, but reverse condition */
6557 /* Check to see if argument to %c is really a constant
6558 and not a condition code which needs to be reversed. */
6559 if (!COMPARISON_P (x))
6561 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
6564 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
6567 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6568 if (ASSEMBLER_DIALECT == ASM_ATT)
6571 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
6575 /* It doesn't actually matter what mode we use here, as we're
6576 only going to use this for printing. */
6577 x = adjust_address_nv (x, DImode, 8);
6584 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
6587 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
6590 int pred_val = INTVAL (XEXP (x, 0));
6592 if (pred_val < REG_BR_PROB_BASE * 45 / 100
6593 || pred_val > REG_BR_PROB_BASE * 55 / 100)
6595 int taken = pred_val > REG_BR_PROB_BASE / 2;
6596 int cputaken = final_forward_branch_p (current_output_insn) == 0;
6598 /* Emit hints only in the case default branch prediction
6599 heuristics would fail. */
6600 if (taken != cputaken)
6602 /* We use 3e (DS) prefix for taken branches and
6603 2e (CS) prefix for not taken branches. */
6605 fputs ("ds ; ", file);
6607 fputs ("cs ; ", file);
6614 output_operand_lossage ("invalid operand code '%c'", code);
6618 if (GET_CODE (x) == REG)
6619 print_reg (x, code, file);
6621 else if (GET_CODE (x) == MEM)
6623 /* No `byte ptr' prefix for call instructions. */
6624 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
6627 switch (GET_MODE_SIZE (GET_MODE (x)))
6629 case 1: size = "BYTE"; break;
6630 case 2: size = "WORD"; break;
6631 case 4: size = "DWORD"; break;
6632 case 8: size = "QWORD"; break;
6633 case 12: size = "XWORD"; break;
6634 case 16: size = "XMMWORD"; break;
6639 /* Check for explicit size override (codes 'b', 'w' and 'k') */
6642 else if (code == 'w')
6644 else if (code == 'k')
6648 fputs (" PTR ", file);
6652 /* Avoid (%rip) for call operands. */
6653 if (CONSTANT_ADDRESS_P (x) && code == 'P'
6654 && GET_CODE (x) != CONST_INT)
6655 output_addr_const (file, x);
6656 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
6657 output_operand_lossage ("invalid constraints for operand");
6662 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
6667 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6668 REAL_VALUE_TO_TARGET_SINGLE (r, l);
6670 if (ASSEMBLER_DIALECT == ASM_ATT)
6672 fprintf (file, "0x%08lx", l);
6675 /* These float cases don't actually occur as immediate operands. */
6676 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
6680 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6681 fprintf (file, "%s", dstr);
6684 else if (GET_CODE (x) == CONST_DOUBLE
6685 && GET_MODE (x) == XFmode)
6689 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6690 fprintf (file, "%s", dstr);
6697 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
6699 if (ASSEMBLER_DIALECT == ASM_ATT)
6702 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
6703 || GET_CODE (x) == LABEL_REF)
6705 if (ASSEMBLER_DIALECT == ASM_ATT)
6708 fputs ("OFFSET FLAT:", file);
6711 if (GET_CODE (x) == CONST_INT)
6712 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6714 output_pic_addr_const (file, x, code);
6716 output_addr_const (file, x);
6720 /* Print a memory operand whose address is ADDR. */
6723 print_operand_address (FILE *file, rtx addr)
6725 struct ix86_address parts;
6726 rtx base, index, disp;
6729 if (! ix86_decompose_address (addr, &parts))
6733 index = parts.index;
6735 scale = parts.scale;
6743 if (USER_LABEL_PREFIX[0] == 0)
6745 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
6751 if (!base && !index)
6753 /* Displacement only requires special attention. */
6755 if (GET_CODE (disp) == CONST_INT)
6757 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
6759 if (USER_LABEL_PREFIX[0] == 0)
6761 fputs ("ds:", file);
6763 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
6766 output_pic_addr_const (file, disp, 0);
6768 output_addr_const (file, disp);
6770 /* Use one byte shorter RIP relative addressing for 64bit mode. */
6772 && ((GET_CODE (disp) == SYMBOL_REF
6773 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
6774 || GET_CODE (disp) == LABEL_REF
6775 || (GET_CODE (disp) == CONST
6776 && GET_CODE (XEXP (disp, 0)) == PLUS
6777 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
6778 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
6779 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
6780 fputs ("(%rip)", file);
6784 if (ASSEMBLER_DIALECT == ASM_ATT)
6789 output_pic_addr_const (file, disp, 0);
6790 else if (GET_CODE (disp) == LABEL_REF)
6791 output_asm_label (disp);
6793 output_addr_const (file, disp);
6798 print_reg (base, 0, file);
6802 print_reg (index, 0, file);
6804 fprintf (file, ",%d", scale);
6810 rtx offset = NULL_RTX;
6814 /* Pull out the offset of a symbol; print any symbol itself. */
6815 if (GET_CODE (disp) == CONST
6816 && GET_CODE (XEXP (disp, 0)) == PLUS
6817 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
6819 offset = XEXP (XEXP (disp, 0), 1);
6820 disp = gen_rtx_CONST (VOIDmode,
6821 XEXP (XEXP (disp, 0), 0));
6825 output_pic_addr_const (file, disp, 0);
6826 else if (GET_CODE (disp) == LABEL_REF)
6827 output_asm_label (disp);
6828 else if (GET_CODE (disp) == CONST_INT)
6831 output_addr_const (file, disp);
6837 print_reg (base, 0, file);
6840 if (INTVAL (offset) >= 0)
6842 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6846 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6853 print_reg (index, 0, file);
6855 fprintf (file, "*%d", scale);
6863 output_addr_const_extra (FILE *file, rtx x)
6867 if (GET_CODE (x) != UNSPEC)
6870 op = XVECEXP (x, 0, 0);
6871 switch (XINT (x, 1))
6873 case UNSPEC_GOTTPOFF:
6874 output_addr_const (file, op);
6875 /* FIXME: This might be @TPOFF in Sun ld. */
6876 fputs ("@GOTTPOFF", file);
6879 output_addr_const (file, op);
6880 fputs ("@TPOFF", file);
6883 output_addr_const (file, op);
6885 fputs ("@TPOFF", file);
6887 fputs ("@NTPOFF", file);
6890 output_addr_const (file, op);
6891 fputs ("@DTPOFF", file);
6893 case UNSPEC_GOTNTPOFF:
6894 output_addr_const (file, op);
6896 fputs ("@GOTTPOFF(%rip)", file);
6898 fputs ("@GOTNTPOFF", file);
6900 case UNSPEC_INDNTPOFF:
6901 output_addr_const (file, op);
6902 fputs ("@INDNTPOFF", file);
6912 /* Split one or more DImode RTL references into pairs of SImode
6913 references. The RTL can be REG, offsettable MEM, integer constant, or
6914 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6915 split and "num" is its length. lo_half and hi_half are output arrays
6916 that parallel "operands". */
6919 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6923 rtx op = operands[num];
6925 /* simplify_subreg refuse to split volatile memory addresses,
6926 but we still have to handle it. */
6927 if (GET_CODE (op) == MEM)
6929 lo_half[num] = adjust_address (op, SImode, 0);
6930 hi_half[num] = adjust_address (op, SImode, 4);
6934 lo_half[num] = simplify_gen_subreg (SImode, op,
6935 GET_MODE (op) == VOIDmode
6936 ? DImode : GET_MODE (op), 0);
6937 hi_half[num] = simplify_gen_subreg (SImode, op,
6938 GET_MODE (op) == VOIDmode
6939 ? DImode : GET_MODE (op), 4);
6943 /* Split one or more TImode RTL references into pairs of SImode
6944 references. The RTL can be REG, offsettable MEM, integer constant, or
6945 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6946 split and "num" is its length. lo_half and hi_half are output arrays
6947 that parallel "operands". */
6950 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6954 rtx op = operands[num];
6956 /* simplify_subreg refuse to split volatile memory addresses, but we
6957 still have to handle it. */
6958 if (GET_CODE (op) == MEM)
6960 lo_half[num] = adjust_address (op, DImode, 0);
6961 hi_half[num] = adjust_address (op, DImode, 8);
6965 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
6966 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
6971 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
6972 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
6973 is the expression of the binary operation. The output may either be
6974 emitted here, or returned to the caller, like all output_* functions.
6976 There is no guarantee that the operands are the same mode, as they
6977 might be within FLOAT or FLOAT_EXTEND expressions. */
6979 #ifndef SYSV386_COMPAT
6980 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
6981 wants to fix the assemblers because that causes incompatibility
6982 with gcc. No-one wants to fix gcc because that causes
6983 incompatibility with assemblers... You can use the option of
6984 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
6985 #define SYSV386_COMPAT 1
6989 output_387_binary_op (rtx insn, rtx *operands)
6991 static char buf[30];
6994 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
6996 #ifdef ENABLE_CHECKING
6997 /* Even if we do not want to check the inputs, this documents input
6998 constraints. Which helps in understanding the following code. */
6999 if (STACK_REG_P (operands[0])
7000 && ((REG_P (operands[1])
7001 && REGNO (operands[0]) == REGNO (operands[1])
7002 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7003 || (REG_P (operands[2])
7004 && REGNO (operands[0]) == REGNO (operands[2])
7005 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7006 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7012 switch (GET_CODE (operands[3]))
7015 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7016 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7024 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7025 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7033 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7034 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7042 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7043 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7057 if (GET_MODE (operands[0]) == SFmode)
7058 strcat (buf, "ss\t{%2, %0|%0, %2}");
7060 strcat (buf, "sd\t{%2, %0|%0, %2}");
7065 switch (GET_CODE (operands[3]))
7069 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7071 rtx temp = operands[2];
7072 operands[2] = operands[1];
7076 /* know operands[0] == operands[1]. */
7078 if (GET_CODE (operands[2]) == MEM)
7084 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7086 if (STACK_TOP_P (operands[0]))
7087 /* How is it that we are storing to a dead operand[2]?
7088 Well, presumably operands[1] is dead too. We can't
7089 store the result to st(0) as st(0) gets popped on this
7090 instruction. Instead store to operands[2] (which I
7091 think has to be st(1)). st(1) will be popped later.
7092 gcc <= 2.8.1 didn't have this check and generated
7093 assembly code that the Unixware assembler rejected. */
7094 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7096 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7100 if (STACK_TOP_P (operands[0]))
7101 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7103 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7108 if (GET_CODE (operands[1]) == MEM)
7114 if (GET_CODE (operands[2]) == MEM)
7120 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7123 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7124 derived assemblers, confusingly reverse the direction of
7125 the operation for fsub{r} and fdiv{r} when the
7126 destination register is not st(0). The Intel assembler
7127 doesn't have this brain damage. Read !SYSV386_COMPAT to
7128 figure out what the hardware really does. */
7129 if (STACK_TOP_P (operands[0]))
7130 p = "{p\t%0, %2|rp\t%2, %0}";
7132 p = "{rp\t%2, %0|p\t%0, %2}";
7134 if (STACK_TOP_P (operands[0]))
7135 /* As above for fmul/fadd, we can't store to st(0). */
7136 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7138 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7143 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7146 if (STACK_TOP_P (operands[0]))
7147 p = "{rp\t%0, %1|p\t%1, %0}";
7149 p = "{p\t%1, %0|rp\t%0, %1}";
7151 if (STACK_TOP_P (operands[0]))
7152 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7154 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7159 if (STACK_TOP_P (operands[0]))
7161 if (STACK_TOP_P (operands[1]))
7162 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7164 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7167 else if (STACK_TOP_P (operands[1]))
7170 p = "{\t%1, %0|r\t%0, %1}";
7172 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7178 p = "{r\t%2, %0|\t%0, %2}";
7180 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7193 /* Output code to initialize control word copies used by trunc?f?i and
7194 rounding patterns. CURRENT_MODE is set to current control word,
7195 while NEW_MODE is set to new control word. */
7198 emit_i387_cw_initialization (rtx current_mode, rtx new_mode, int mode)
7200 rtx reg = gen_reg_rtx (HImode);
7202 emit_insn (gen_x86_fnstcw_1 (current_mode));
7203 emit_move_insn (reg, current_mode);
7205 if (!TARGET_PARTIAL_REG_STALL && !optimize_size
7211 /* round down toward -oo */
7212 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7216 /* round up toward +oo */
7217 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7221 /* round toward zero (truncate) */
7222 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7225 case I387_CW_MASK_PM:
7226 /* mask precision exception for nearbyint() */
7227 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7239 /* round down toward -oo */
7240 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7241 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7245 /* round up toward +oo */
7246 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7247 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7251 /* round toward zero (truncate) */
7252 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7255 case I387_CW_MASK_PM:
7256 /* mask precision exception for nearbyint() */
7257 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7265 emit_move_insn (new_mode, reg);
7268 /* Output code for INSN to convert a float to a signed int. OPERANDS
7269 are the insn operands. The output may be [HSD]Imode and the input
7270 operand may be [SDX]Fmode. */
7273 output_fix_trunc (rtx insn, rtx *operands)
7275 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7276 int dimode_p = GET_MODE (operands[0]) == DImode;
7278 /* Jump through a hoop or two for DImode, since the hardware has no
7279 non-popping instruction. We used to do this a different way, but
7280 that was somewhat fragile and broke with post-reload splitters. */
7281 if (dimode_p && !stack_top_dies)
7282 output_asm_insn ("fld\t%y1", operands);
7284 if (!STACK_TOP_P (operands[1]))
7287 if (GET_CODE (operands[0]) != MEM)
7290 output_asm_insn ("fldcw\t%3", operands);
7291 if (stack_top_dies || dimode_p)
7292 output_asm_insn ("fistp%z0\t%0", operands);
7294 output_asm_insn ("fist%z0\t%0", operands);
7295 output_asm_insn ("fldcw\t%2", operands);
7300 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7301 should be used. UNORDERED_P is true when fucom should be used. */
7304 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7307 rtx cmp_op0, cmp_op1;
7308 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
7312 cmp_op0 = operands[0];
7313 cmp_op1 = operands[1];
7317 cmp_op0 = operands[1];
7318 cmp_op1 = operands[2];
7323 if (GET_MODE (operands[0]) == SFmode)
7325 return "ucomiss\t{%1, %0|%0, %1}";
7327 return "comiss\t{%1, %0|%0, %1}";
7330 return "ucomisd\t{%1, %0|%0, %1}";
7332 return "comisd\t{%1, %0|%0, %1}";
7335 if (! STACK_TOP_P (cmp_op0))
7338 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7340 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
7344 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
7345 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
7348 return "ftst\n\tfnstsw\t%0";
7351 if (STACK_REG_P (cmp_op1)
7353 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7354 && REGNO (cmp_op1) != FIRST_STACK_REG)
7356 /* If both the top of the 387 stack dies, and the other operand
7357 is also a stack register that dies, then this must be a
7358 `fcompp' float compare */
7362 /* There is no double popping fcomi variant. Fortunately,
7363 eflags is immune from the fstp's cc clobbering. */
7365 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7367 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7368 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7373 return "fucompp\n\tfnstsw\t%0";
7375 return "fcompp\n\tfnstsw\t%0";
7380 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7382 static const char * const alt[16] =
7384 "fcom%z2\t%y2\n\tfnstsw\t%0",
7385 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7386 "fucom%z2\t%y2\n\tfnstsw\t%0",
7387 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7389 "ficom%z2\t%y2\n\tfnstsw\t%0",
7390 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7394 "fcomi\t{%y1, %0|%0, %y1}",
7395 "fcomip\t{%y1, %0|%0, %y1}",
7396 "fucomi\t{%y1, %0|%0, %y1}",
7397 "fucomip\t{%y1, %0|%0, %y1}",
7408 mask = eflags_p << 3;
7409 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
7410 mask |= unordered_p << 1;
7411 mask |= stack_top_dies;
7424 ix86_output_addr_vec_elt (FILE *file, int value)
7426 const char *directive = ASM_LONG;
7431 directive = ASM_QUAD;
7437 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
7441 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
7444 fprintf (file, "%s%s%d-%s%d\n",
7445 ASM_LONG, LPREFIX, value, LPREFIX, rel);
7446 else if (HAVE_AS_GOTOFF_IN_DATA)
7447 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
7449 else if (TARGET_MACHO)
7451 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
7452 machopic_output_function_base_name (file);
7453 fprintf(file, "\n");
7457 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
7458 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
7461 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
7465 ix86_expand_clear (rtx dest)
7469 /* We play register width games, which are only valid after reload. */
7470 if (!reload_completed)
7473 /* Avoid HImode and its attendant prefix byte. */
7474 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
7475 dest = gen_rtx_REG (SImode, REGNO (dest));
7477 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
7479 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
7480 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
7482 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
7483 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
7489 /* X is an unchanging MEM. If it is a constant pool reference, return
7490 the constant pool rtx, else NULL. */
7493 maybe_get_pool_constant (rtx x)
7495 x = ix86_delegitimize_address (XEXP (x, 0));
7497 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
7498 return get_pool_constant (x);
7504 ix86_expand_move (enum machine_mode mode, rtx operands[])
7506 int strict = (reload_in_progress || reload_completed);
7508 enum tls_model model;
7513 if (GET_CODE (op1) == SYMBOL_REF)
7515 model = SYMBOL_REF_TLS_MODEL (op1);
7518 op1 = legitimize_tls_address (op1, model, true);
7519 op1 = force_operand (op1, op0);
7524 else if (GET_CODE (op1) == CONST
7525 && GET_CODE (XEXP (op1, 0)) == PLUS
7526 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
7528 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
7531 rtx addend = XEXP (XEXP (op1, 0), 1);
7532 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
7533 op1 = force_operand (op1, NULL);
7534 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
7535 op0, 1, OPTAB_DIRECT);
7541 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
7546 rtx temp = ((reload_in_progress
7547 || ((op0 && GET_CODE (op0) == REG)
7549 ? op0 : gen_reg_rtx (Pmode));
7550 op1 = machopic_indirect_data_reference (op1, temp);
7551 op1 = machopic_legitimize_pic_address (op1, mode,
7552 temp == op1 ? 0 : temp);
7554 else if (MACHOPIC_INDIRECT)
7555 op1 = machopic_indirect_data_reference (op1, 0);
7559 if (GET_CODE (op0) == MEM)
7560 op1 = force_reg (Pmode, op1);
7562 op1 = legitimize_address (op1, op1, Pmode);
7563 #endif /* TARGET_MACHO */
7567 if (GET_CODE (op0) == MEM
7568 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
7569 || !push_operand (op0, mode))
7570 && GET_CODE (op1) == MEM)
7571 op1 = force_reg (mode, op1);
7573 if (push_operand (op0, mode)
7574 && ! general_no_elim_operand (op1, mode))
7575 op1 = copy_to_mode_reg (mode, op1);
7577 /* Force large constants in 64bit compilation into register
7578 to get them CSEed. */
7579 if (TARGET_64BIT && mode == DImode
7580 && immediate_operand (op1, mode)
7581 && !x86_64_zext_immediate_operand (op1, VOIDmode)
7582 && !register_operand (op0, mode)
7583 && optimize && !reload_completed && !reload_in_progress)
7584 op1 = copy_to_mode_reg (mode, op1);
7586 if (FLOAT_MODE_P (mode))
7588 /* If we are loading a floating point constant to a register,
7589 force the value to memory now, since we'll get better code
7590 out the back end. */
7594 else if (GET_CODE (op1) == CONST_DOUBLE)
7596 op1 = validize_mem (force_const_mem (mode, op1));
7597 if (!register_operand (op0, mode))
7599 rtx temp = gen_reg_rtx (mode);
7600 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
7601 emit_move_insn (op0, temp);
7608 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7612 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
7614 rtx op0 = operands[0], op1 = operands[1];
7616 /* Force constants other than zero into memory. We do not know how
7617 the instructions used to build constants modify the upper 64 bits
7618 of the register, once we have that information we may be able
7619 to handle some of them more efficiently. */
7620 if ((reload_in_progress | reload_completed) == 0
7621 && register_operand (op0, mode)
7622 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
7623 op1 = validize_mem (force_const_mem (mode, op1));
7625 /* Make operand1 a register if it isn't already. */
7627 && !register_operand (op0, mode)
7628 && !register_operand (op1, mode))
7630 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
7634 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7637 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
7638 straight to ix86_expand_vector_move. */
7641 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
7650 /* If we're optimizing for size, movups is the smallest. */
7653 op0 = gen_lowpart (V4SFmode, op0);
7654 op1 = gen_lowpart (V4SFmode, op1);
7655 emit_insn (gen_sse_movups (op0, op1));
7659 /* ??? If we have typed data, then it would appear that using
7660 movdqu is the only way to get unaligned data loaded with
7662 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7664 op0 = gen_lowpart (V16QImode, op0);
7665 op1 = gen_lowpart (V16QImode, op1);
7666 emit_insn (gen_sse2_movdqu (op0, op1));
7670 if (TARGET_SSE2 && mode == V2DFmode)
7674 /* When SSE registers are split into halves, we can avoid
7675 writing to the top half twice. */
7676 if (TARGET_SSE_SPLIT_REGS)
7678 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
7683 /* ??? Not sure about the best option for the Intel chips.
7684 The following would seem to satisfy; the register is
7685 entirely cleared, breaking the dependency chain. We
7686 then store to the upper half, with a dependency depth
7687 of one. A rumor has it that Intel recommends two movsd
7688 followed by an unpacklpd, but this is unconfirmed. And
7689 given that the dependency depth of the unpacklpd would
7690 still be one, I'm not sure why this would be better. */
7691 zero = CONST0_RTX (V2DFmode);
7694 m = adjust_address (op1, DFmode, 0);
7695 emit_insn (gen_sse2_loadlpd (op0, zero, m));
7696 m = adjust_address (op1, DFmode, 8);
7697 emit_insn (gen_sse2_loadhpd (op0, op0, m));
7701 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
7702 emit_move_insn (op0, CONST0_RTX (mode));
7704 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
7706 m = adjust_address (op1, V2SFmode, 0);
7707 emit_insn (gen_sse_loadlps (op0, op0, m));
7708 m = adjust_address (op1, V2SFmode, 8);
7709 emit_insn (gen_sse_loadhps (op0, op0, m));
7712 else if (MEM_P (op0))
7714 /* If we're optimizing for size, movups is the smallest. */
7717 op0 = gen_lowpart (V4SFmode, op0);
7718 op1 = gen_lowpart (V4SFmode, op1);
7719 emit_insn (gen_sse_movups (op0, op1));
7723 /* ??? Similar to above, only less clear because of quote
7724 typeless stores unquote. */
7725 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
7726 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7728 op0 = gen_lowpart (V16QImode, op0);
7729 op1 = gen_lowpart (V16QImode, op1);
7730 emit_insn (gen_sse2_movdqu (op0, op1));
7734 if (TARGET_SSE2 && mode == V2DFmode)
7736 m = adjust_address (op0, DFmode, 0);
7737 emit_insn (gen_sse2_storelpd (m, op1));
7738 m = adjust_address (op0, DFmode, 8);
7739 emit_insn (gen_sse2_storehpd (m, op1));
7743 if (mode != V4SFmode)
7744 op1 = gen_lowpart (V4SFmode, op1);
7745 m = adjust_address (op0, V2SFmode, 0);
7746 emit_insn (gen_sse_storelps (m, op1));
7747 m = adjust_address (op0, V2SFmode, 8);
7748 emit_insn (gen_sse_storehps (m, op1));
7756 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
7757 destination to use for the operation. If different from the true
7758 destination in operands[0], a copy operation will be required. */
7761 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
7764 int matching_memory;
7765 rtx src1, src2, dst;
7771 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
7772 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7773 && (rtx_equal_p (dst, src2)
7774 || immediate_operand (src1, mode)))
7781 /* If the destination is memory, and we do not have matching source
7782 operands, do things in registers. */
7783 matching_memory = 0;
7784 if (GET_CODE (dst) == MEM)
7786 if (rtx_equal_p (dst, src1))
7787 matching_memory = 1;
7788 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7789 && rtx_equal_p (dst, src2))
7790 matching_memory = 2;
7792 dst = gen_reg_rtx (mode);
7795 /* Both source operands cannot be in memory. */
7796 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
7798 if (matching_memory != 2)
7799 src2 = force_reg (mode, src2);
7801 src1 = force_reg (mode, src1);
7804 /* If the operation is not commutable, source 1 cannot be a constant
7805 or non-matching memory. */
7806 if ((CONSTANT_P (src1)
7807 || (!matching_memory && GET_CODE (src1) == MEM))
7808 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7809 src1 = force_reg (mode, src1);
7811 /* If optimizing, copy to regs to improve CSE */
7812 if (optimize && ! no_new_pseudos)
7814 if (GET_CODE (dst) == MEM)
7815 dst = gen_reg_rtx (mode);
7816 if (GET_CODE (src1) == MEM)
7817 src1 = force_reg (mode, src1);
7818 if (GET_CODE (src2) == MEM)
7819 src2 = force_reg (mode, src2);
7822 src1 = operands[1] = src1;
7823 src2 = operands[2] = src2;
7827 /* Similarly, but assume that the destination has already been
7831 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
7832 enum machine_mode mode, rtx operands[])
7834 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
7835 gcc_assert (dst == operands[0]);
7838 /* Attempt to expand a binary operator. Make the expansion closer to the
7839 actual machine, then just general_operand, which will allow 3 separate
7840 memory references (one output, two input) in a single insn. */
7843 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
7846 rtx src1, src2, dst, op, clob;
7848 dst = ix86_fixup_binary_operands (code, mode, operands);
7852 /* Emit the instruction. */
7854 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
7855 if (reload_in_progress)
7857 /* Reload doesn't know about the flags register, and doesn't know that
7858 it doesn't want to clobber it. We can only do this with PLUS. */
7865 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7866 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7869 /* Fix up the destination if needed. */
7870 if (dst != operands[0])
7871 emit_move_insn (operands[0], dst);
7874 /* Return TRUE or FALSE depending on whether the binary operator meets the
7875 appropriate constraints. */
7878 ix86_binary_operator_ok (enum rtx_code code,
7879 enum machine_mode mode ATTRIBUTE_UNUSED,
7882 /* Both source operands cannot be in memory. */
7883 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
7885 /* If the operation is not commutable, source 1 cannot be a constant. */
7886 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7888 /* If the destination is memory, we must have a matching source operand. */
7889 if (GET_CODE (operands[0]) == MEM
7890 && ! (rtx_equal_p (operands[0], operands[1])
7891 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7892 && rtx_equal_p (operands[0], operands[2]))))
7894 /* If the operation is not commutable and the source 1 is memory, we must
7895 have a matching destination. */
7896 if (GET_CODE (operands[1]) == MEM
7897 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
7898 && ! rtx_equal_p (operands[0], operands[1]))
7903 /* Attempt to expand a unary operator. Make the expansion closer to the
7904 actual machine, then just general_operand, which will allow 2 separate
7905 memory references (one output, one input) in a single insn. */
7908 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
7911 int matching_memory;
7912 rtx src, dst, op, clob;
7917 /* If the destination is memory, and we do not have matching source
7918 operands, do things in registers. */
7919 matching_memory = 0;
7922 if (rtx_equal_p (dst, src))
7923 matching_memory = 1;
7925 dst = gen_reg_rtx (mode);
7928 /* When source operand is memory, destination must match. */
7929 if (MEM_P (src) && !matching_memory)
7930 src = force_reg (mode, src);
7932 /* If optimizing, copy to regs to improve CSE. */
7933 if (optimize && ! no_new_pseudos)
7935 if (GET_CODE (dst) == MEM)
7936 dst = gen_reg_rtx (mode);
7937 if (GET_CODE (src) == MEM)
7938 src = force_reg (mode, src);
7941 /* Emit the instruction. */
7943 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
7944 if (reload_in_progress || code == NOT)
7946 /* Reload doesn't know about the flags register, and doesn't know that
7947 it doesn't want to clobber it. */
7954 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7955 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7958 /* Fix up the destination if needed. */
7959 if (dst != operands[0])
7960 emit_move_insn (operands[0], dst);
7963 /* Return TRUE or FALSE depending on whether the unary operator meets the
7964 appropriate constraints. */
7967 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
7968 enum machine_mode mode ATTRIBUTE_UNUSED,
7969 rtx operands[2] ATTRIBUTE_UNUSED)
7971 /* If one of operands is memory, source and destination must match. */
7972 if ((GET_CODE (operands[0]) == MEM
7973 || GET_CODE (operands[1]) == MEM)
7974 && ! rtx_equal_p (operands[0], operands[1]))
7979 /* Generate code for floating point ABS or NEG. */
7982 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
7985 rtx mask, set, use, clob, dst, src;
7986 bool matching_memory;
7987 bool use_sse = false;
7988 bool vector_mode = VECTOR_MODE_P (mode);
7989 enum machine_mode elt_mode = mode;
7990 enum machine_mode vec_mode = VOIDmode;
7994 elt_mode = GET_MODE_INNER (mode);
7998 if (TARGET_SSE_MATH)
8003 vec_mode = V4SFmode;
8005 else if (mode == DFmode && TARGET_SSE2)
8008 vec_mode = V2DFmode;
8012 /* NEG and ABS performed with SSE use bitwise mask operations.
8013 Create the appropriate mask now. */
8016 HOST_WIDE_INT hi, lo;
8020 /* Find the sign bit, sign extended to 2*HWI. */
8021 if (elt_mode == SFmode)
8022 lo = 0x80000000, hi = lo < 0;
8023 else if (HOST_BITS_PER_WIDE_INT >= 64)
8024 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8026 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8028 /* If we're looking for the absolute value, then we want
8033 /* Force this value into the low part of a fp vector constant. */
8034 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
8035 mask = gen_lowpart (mode, mask);
8040 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8041 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8045 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8049 v = gen_rtvec (4, mask, mask, mask, mask);
8053 v = gen_rtvec (2, mask, mask);
8060 mask = gen_rtx_CONST_VECTOR (vec_mode, v);
8061 mask = force_reg (vec_mode, mask);
8065 /* When not using SSE, we don't use the mask, but prefer to keep the
8066 same general form of the insn pattern to reduce duplication when
8067 it comes time to split. */
8074 /* If the destination is memory, and we don't have matching source
8075 operands, do things in registers. */
8076 matching_memory = false;
8079 if (rtx_equal_p (dst, src) && (!optimize || no_new_pseudos))
8080 matching_memory = true;
8082 dst = gen_reg_rtx (mode);
8084 if (MEM_P (src) && !matching_memory)
8085 src = force_reg (mode, src);
8089 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8090 set = gen_rtx_SET (VOIDmode, dst, set);
8095 set = gen_rtx_fmt_e (code, mode, src);
8096 set = gen_rtx_SET (VOIDmode, dst, set);
8097 use = gen_rtx_USE (VOIDmode, mask);
8098 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8099 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8102 if (dst != operands[0])
8103 emit_move_insn (operands[0], dst);
8106 /* Return TRUE or FALSE depending on whether the first SET in INSN
8107 has source and destination with matching CC modes, and that the
8108 CC mode is at least as constrained as REQ_MODE. */
8111 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
8114 enum machine_mode set_mode;
8116 set = PATTERN (insn);
8117 if (GET_CODE (set) == PARALLEL)
8118 set = XVECEXP (set, 0, 0);
8119 if (GET_CODE (set) != SET)
8121 if (GET_CODE (SET_SRC (set)) != COMPARE)
8124 set_mode = GET_MODE (SET_DEST (set));
8128 if (req_mode != CCNOmode
8129 && (req_mode != CCmode
8130 || XEXP (SET_SRC (set), 1) != const0_rtx))
8134 if (req_mode == CCGCmode)
8138 if (req_mode == CCGOCmode || req_mode == CCNOmode)
8142 if (req_mode == CCZmode)
8152 return (GET_MODE (SET_SRC (set)) == set_mode);
8155 /* Generate insn patterns to do an integer compare of OPERANDS. */
8158 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
8160 enum machine_mode cmpmode;
8163 cmpmode = SELECT_CC_MODE (code, op0, op1);
8164 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
8166 /* This is very simple, but making the interface the same as in the
8167 FP case makes the rest of the code easier. */
8168 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
8169 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
8171 /* Return the test that should be put into the flags user, i.e.
8172 the bcc, scc, or cmov instruction. */
8173 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
8176 /* Figure out whether to use ordered or unordered fp comparisons.
8177 Return the appropriate mode to use. */
8180 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
8182 /* ??? In order to make all comparisons reversible, we do all comparisons
8183 non-trapping when compiling for IEEE. Once gcc is able to distinguish
8184 all forms trapping and nontrapping comparisons, we can make inequality
8185 comparisons trapping again, since it results in better code when using
8186 FCOM based compares. */
8187 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
8191 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
8193 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8194 return ix86_fp_compare_mode (code);
8197 /* Only zero flag is needed. */
8199 case NE: /* ZF!=0 */
8201 /* Codes needing carry flag. */
8202 case GEU: /* CF=0 */
8203 case GTU: /* CF=0 & ZF=0 */
8204 case LTU: /* CF=1 */
8205 case LEU: /* CF=1 | ZF=1 */
8207 /* Codes possibly doable only with sign flag when
8208 comparing against zero. */
8209 case GE: /* SF=OF or SF=0 */
8210 case LT: /* SF<>OF or SF=1 */
8211 if (op1 == const0_rtx)
8214 /* For other cases Carry flag is not required. */
8216 /* Codes doable only with sign flag when comparing
8217 against zero, but we miss jump instruction for it
8218 so we need to use relational tests against overflow
8219 that thus needs to be zero. */
8220 case GT: /* ZF=0 & SF=OF */
8221 case LE: /* ZF=1 | SF<>OF */
8222 if (op1 == const0_rtx)
8226 /* strcmp pattern do (use flags) and combine may ask us for proper
8235 /* Return the fixed registers used for condition codes. */
8238 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
8245 /* If two condition code modes are compatible, return a condition code
8246 mode which is compatible with both. Otherwise, return
8249 static enum machine_mode
8250 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
8255 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
8258 if ((m1 == CCGCmode && m2 == CCGOCmode)
8259 || (m1 == CCGOCmode && m2 == CCGCmode))
8287 /* These are only compatible with themselves, which we already
8293 /* Return true if we should use an FCOMI instruction for this fp comparison. */
8296 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
8298 enum rtx_code swapped_code = swap_condition (code);
8299 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
8300 || (ix86_fp_comparison_cost (swapped_code)
8301 == ix86_fp_comparison_fcomi_cost (swapped_code)));
8304 /* Swap, force into registers, or otherwise massage the two operands
8305 to a fp comparison. The operands are updated in place; the new
8306 comparison code is returned. */
8308 static enum rtx_code
8309 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
8311 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
8312 rtx op0 = *pop0, op1 = *pop1;
8313 enum machine_mode op_mode = GET_MODE (op0);
8314 int is_sse = SSE_REG_P (op0) || SSE_REG_P (op1);
8316 /* All of the unordered compare instructions only work on registers.
8317 The same is true of the fcomi compare instructions. The same is
8318 true of the XFmode compare instructions if not comparing with
8319 zero (ftst insn is used in this case). */
8322 && (fpcmp_mode == CCFPUmode
8323 || (op_mode == XFmode
8324 && ! (standard_80387_constant_p (op0) == 1
8325 || standard_80387_constant_p (op1) == 1))
8326 || ix86_use_fcomi_compare (code)))
8328 op0 = force_reg (op_mode, op0);
8329 op1 = force_reg (op_mode, op1);
8333 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
8334 things around if they appear profitable, otherwise force op0
8337 if (standard_80387_constant_p (op0) == 0
8338 || (GET_CODE (op0) == MEM
8339 && ! (standard_80387_constant_p (op1) == 0
8340 || GET_CODE (op1) == MEM)))
8343 tmp = op0, op0 = op1, op1 = tmp;
8344 code = swap_condition (code);
8347 if (GET_CODE (op0) != REG)
8348 op0 = force_reg (op_mode, op0);
8350 if (CONSTANT_P (op1))
8352 int tmp = standard_80387_constant_p (op1);
8354 op1 = validize_mem (force_const_mem (op_mode, op1));
8358 op1 = force_reg (op_mode, op1);
8361 op1 = force_reg (op_mode, op1);
8365 /* Try to rearrange the comparison to make it cheaper. */
8366 if (ix86_fp_comparison_cost (code)
8367 > ix86_fp_comparison_cost (swap_condition (code))
8368 && (GET_CODE (op1) == REG || !no_new_pseudos))
8371 tmp = op0, op0 = op1, op1 = tmp;
8372 code = swap_condition (code);
8373 if (GET_CODE (op0) != REG)
8374 op0 = force_reg (op_mode, op0);
8382 /* Convert comparison codes we use to represent FP comparison to integer
8383 code that will result in proper branch. Return UNKNOWN if no such code
8387 ix86_fp_compare_code_to_integer (enum rtx_code code)
8416 /* Split comparison code CODE into comparisons we can do using branch
8417 instructions. BYPASS_CODE is comparison code for branch that will
8418 branch around FIRST_CODE and SECOND_CODE. If some of branches
8419 is not required, set value to UNKNOWN.
8420 We never require more than two branches. */
8423 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
8424 enum rtx_code *first_code,
8425 enum rtx_code *second_code)
8428 *bypass_code = UNKNOWN;
8429 *second_code = UNKNOWN;
8431 /* The fcomi comparison sets flags as follows:
8441 case GT: /* GTU - CF=0 & ZF=0 */
8442 case GE: /* GEU - CF=0 */
8443 case ORDERED: /* PF=0 */
8444 case UNORDERED: /* PF=1 */
8445 case UNEQ: /* EQ - ZF=1 */
8446 case UNLT: /* LTU - CF=1 */
8447 case UNLE: /* LEU - CF=1 | ZF=1 */
8448 case LTGT: /* EQ - ZF=0 */
8450 case LT: /* LTU - CF=1 - fails on unordered */
8452 *bypass_code = UNORDERED;
8454 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
8456 *bypass_code = UNORDERED;
8458 case EQ: /* EQ - ZF=1 - fails on unordered */
8460 *bypass_code = UNORDERED;
8462 case NE: /* NE - ZF=0 - fails on unordered */
8464 *second_code = UNORDERED;
8466 case UNGE: /* GEU - CF=0 - fails on unordered */
8468 *second_code = UNORDERED;
8470 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
8472 *second_code = UNORDERED;
8477 if (!TARGET_IEEE_FP)
8479 *second_code = UNKNOWN;
8480 *bypass_code = UNKNOWN;
8484 /* Return cost of comparison done fcom + arithmetics operations on AX.
8485 All following functions do use number of instructions as a cost metrics.
8486 In future this should be tweaked to compute bytes for optimize_size and
8487 take into account performance of various instructions on various CPUs. */
8489 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
8491 if (!TARGET_IEEE_FP)
8493 /* The cost of code output by ix86_expand_fp_compare. */
8521 /* Return cost of comparison done using fcomi operation.
8522 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8524 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
8526 enum rtx_code bypass_code, first_code, second_code;
8527 /* Return arbitrarily high cost when instruction is not supported - this
8528 prevents gcc from using it. */
8531 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8532 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
8535 /* Return cost of comparison done using sahf operation.
8536 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8538 ix86_fp_comparison_sahf_cost (enum rtx_code code)
8540 enum rtx_code bypass_code, first_code, second_code;
8541 /* Return arbitrarily high cost when instruction is not preferred - this
8542 avoids gcc from using it. */
8543 if (!TARGET_USE_SAHF && !optimize_size)
8545 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8546 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
8549 /* Compute cost of the comparison done using any method.
8550 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8552 ix86_fp_comparison_cost (enum rtx_code code)
8554 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
8557 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
8558 sahf_cost = ix86_fp_comparison_sahf_cost (code);
8560 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
8561 if (min > sahf_cost)
8563 if (min > fcomi_cost)
8568 /* Generate insn patterns to do a floating point compare of OPERANDS. */
8571 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
8572 rtx *second_test, rtx *bypass_test)
8574 enum machine_mode fpcmp_mode, intcmp_mode;
8576 int cost = ix86_fp_comparison_cost (code);
8577 enum rtx_code bypass_code, first_code, second_code;
8579 fpcmp_mode = ix86_fp_compare_mode (code);
8580 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
8583 *second_test = NULL_RTX;
8585 *bypass_test = NULL_RTX;
8587 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8589 /* Do fcomi/sahf based test when profitable. */
8590 if ((bypass_code == UNKNOWN || bypass_test)
8591 && (second_code == UNKNOWN || second_test)
8592 && ix86_fp_comparison_arithmetics_cost (code) > cost)
8596 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8597 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
8603 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8604 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8606 scratch = gen_reg_rtx (HImode);
8607 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8608 emit_insn (gen_x86_sahf_1 (scratch));
8611 /* The FP codes work out to act like unsigned. */
8612 intcmp_mode = fpcmp_mode;
8614 if (bypass_code != UNKNOWN)
8615 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
8616 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8618 if (second_code != UNKNOWN)
8619 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
8620 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8625 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
8626 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8627 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8629 scratch = gen_reg_rtx (HImode);
8630 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8632 /* In the unordered case, we have to check C2 for NaN's, which
8633 doesn't happen to work out to anything nice combination-wise.
8634 So do some bit twiddling on the value we've got in AH to come
8635 up with an appropriate set of condition codes. */
8637 intcmp_mode = CCNOmode;
8642 if (code == GT || !TARGET_IEEE_FP)
8644 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8649 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8650 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8651 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
8652 intcmp_mode = CCmode;
8658 if (code == LT && TARGET_IEEE_FP)
8660 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8661 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
8662 intcmp_mode = CCmode;
8667 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
8673 if (code == GE || !TARGET_IEEE_FP)
8675 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
8680 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8681 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8688 if (code == LE && TARGET_IEEE_FP)
8690 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8691 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8692 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8693 intcmp_mode = CCmode;
8698 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8704 if (code == EQ && TARGET_IEEE_FP)
8706 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8707 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8708 intcmp_mode = CCmode;
8713 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8720 if (code == NE && TARGET_IEEE_FP)
8722 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8723 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8729 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8735 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8739 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8748 /* Return the test that should be put into the flags user, i.e.
8749 the bcc, scc, or cmov instruction. */
8750 return gen_rtx_fmt_ee (code, VOIDmode,
8751 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8756 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
8759 op0 = ix86_compare_op0;
8760 op1 = ix86_compare_op1;
8763 *second_test = NULL_RTX;
8765 *bypass_test = NULL_RTX;
8767 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8768 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
8769 second_test, bypass_test);
8771 ret = ix86_expand_int_compare (code, op0, op1);
8776 /* Return true if the CODE will result in nontrivial jump sequence. */
8778 ix86_fp_jump_nontrivial_p (enum rtx_code code)
8780 enum rtx_code bypass_code, first_code, second_code;
8783 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8784 return bypass_code != UNKNOWN || second_code != UNKNOWN;
8788 ix86_expand_branch (enum rtx_code code, rtx label)
8792 switch (GET_MODE (ix86_compare_op0))
8798 tmp = ix86_expand_compare (code, NULL, NULL);
8799 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8800 gen_rtx_LABEL_REF (VOIDmode, label),
8802 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
8811 enum rtx_code bypass_code, first_code, second_code;
8813 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
8816 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8818 /* Check whether we will use the natural sequence with one jump. If
8819 so, we can expand jump early. Otherwise delay expansion by
8820 creating compound insn to not confuse optimizers. */
8821 if (bypass_code == UNKNOWN && second_code == UNKNOWN
8824 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
8825 gen_rtx_LABEL_REF (VOIDmode, label),
8826 pc_rtx, NULL_RTX, NULL_RTX);
8830 tmp = gen_rtx_fmt_ee (code, VOIDmode,
8831 ix86_compare_op0, ix86_compare_op1);
8832 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8833 gen_rtx_LABEL_REF (VOIDmode, label),
8835 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
8837 use_fcomi = ix86_use_fcomi_compare (code);
8838 vec = rtvec_alloc (3 + !use_fcomi);
8839 RTVEC_ELT (vec, 0) = tmp;
8841 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
8843 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
8846 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
8848 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
8856 /* Expand DImode branch into multiple compare+branch. */
8858 rtx lo[2], hi[2], label2;
8859 enum rtx_code code1, code2, code3;
8861 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
8863 tmp = ix86_compare_op0;
8864 ix86_compare_op0 = ix86_compare_op1;
8865 ix86_compare_op1 = tmp;
8866 code = swap_condition (code);
8868 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
8869 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
8871 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
8872 avoid two branches. This costs one extra insn, so disable when
8873 optimizing for size. */
8875 if ((code == EQ || code == NE)
8877 || hi[1] == const0_rtx || lo[1] == const0_rtx))
8882 if (hi[1] != const0_rtx)
8883 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
8884 NULL_RTX, 0, OPTAB_WIDEN);
8887 if (lo[1] != const0_rtx)
8888 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
8889 NULL_RTX, 0, OPTAB_WIDEN);
8891 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
8892 NULL_RTX, 0, OPTAB_WIDEN);
8894 ix86_compare_op0 = tmp;
8895 ix86_compare_op1 = const0_rtx;
8896 ix86_expand_branch (code, label);
8900 /* Otherwise, if we are doing less-than or greater-or-equal-than,
8901 op1 is a constant and the low word is zero, then we can just
8902 examine the high word. */
8904 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
8907 case LT: case LTU: case GE: case GEU:
8908 ix86_compare_op0 = hi[0];
8909 ix86_compare_op1 = hi[1];
8910 ix86_expand_branch (code, label);
8916 /* Otherwise, we need two or three jumps. */
8918 label2 = gen_label_rtx ();
8921 code2 = swap_condition (code);
8922 code3 = unsigned_condition (code);
8926 case LT: case GT: case LTU: case GTU:
8929 case LE: code1 = LT; code2 = GT; break;
8930 case GE: code1 = GT; code2 = LT; break;
8931 case LEU: code1 = LTU; code2 = GTU; break;
8932 case GEU: code1 = GTU; code2 = LTU; break;
8934 case EQ: code1 = UNKNOWN; code2 = NE; break;
8935 case NE: code2 = UNKNOWN; break;
8943 * if (hi(a) < hi(b)) goto true;
8944 * if (hi(a) > hi(b)) goto false;
8945 * if (lo(a) < lo(b)) goto true;
8949 ix86_compare_op0 = hi[0];
8950 ix86_compare_op1 = hi[1];
8952 if (code1 != UNKNOWN)
8953 ix86_expand_branch (code1, label);
8954 if (code2 != UNKNOWN)
8955 ix86_expand_branch (code2, label2);
8957 ix86_compare_op0 = lo[0];
8958 ix86_compare_op1 = lo[1];
8959 ix86_expand_branch (code3, label);
8961 if (code2 != UNKNOWN)
8962 emit_label (label2);
8971 /* Split branch based on floating point condition. */
8973 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
8974 rtx target1, rtx target2, rtx tmp, rtx pushed)
8977 rtx label = NULL_RTX;
8979 int bypass_probability = -1, second_probability = -1, probability = -1;
8982 if (target2 != pc_rtx)
8985 code = reverse_condition_maybe_unordered (code);
8990 condition = ix86_expand_fp_compare (code, op1, op2,
8991 tmp, &second, &bypass);
8993 /* Remove pushed operand from stack. */
8995 ix86_free_from_memory (GET_MODE (pushed));
8997 if (split_branch_probability >= 0)
8999 /* Distribute the probabilities across the jumps.
9000 Assume the BYPASS and SECOND to be always test
9002 probability = split_branch_probability;
9004 /* Value of 1 is low enough to make no need for probability
9005 to be updated. Later we may run some experiments and see
9006 if unordered values are more frequent in practice. */
9008 bypass_probability = 1;
9010 second_probability = 1;
9012 if (bypass != NULL_RTX)
9014 label = gen_label_rtx ();
9015 i = emit_jump_insn (gen_rtx_SET
9017 gen_rtx_IF_THEN_ELSE (VOIDmode,
9019 gen_rtx_LABEL_REF (VOIDmode,
9022 if (bypass_probability >= 0)
9024 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9025 GEN_INT (bypass_probability),
9028 i = emit_jump_insn (gen_rtx_SET
9030 gen_rtx_IF_THEN_ELSE (VOIDmode,
9031 condition, target1, target2)));
9032 if (probability >= 0)
9034 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9035 GEN_INT (probability),
9037 if (second != NULL_RTX)
9039 i = emit_jump_insn (gen_rtx_SET
9041 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9043 if (second_probability >= 0)
9045 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9046 GEN_INT (second_probability),
9049 if (label != NULL_RTX)
9054 ix86_expand_setcc (enum rtx_code code, rtx dest)
9056 rtx ret, tmp, tmpreg, equiv;
9057 rtx second_test, bypass_test;
9059 if (GET_MODE (ix86_compare_op0) == DImode
9061 return 0; /* FAIL */
9063 if (GET_MODE (dest) != QImode)
9066 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9067 PUT_MODE (ret, QImode);
9072 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
9073 if (bypass_test || second_test)
9075 rtx test = second_test;
9077 rtx tmp2 = gen_reg_rtx (QImode);
9084 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
9086 PUT_MODE (test, QImode);
9087 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
9090 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
9092 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
9095 /* Attach a REG_EQUAL note describing the comparison result. */
9096 equiv = simplify_gen_relational (code, QImode,
9097 GET_MODE (ix86_compare_op0),
9098 ix86_compare_op0, ix86_compare_op1);
9099 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
9101 return 1; /* DONE */
9104 /* Expand comparison setting or clearing carry flag. Return true when
9105 successful and set pop for the operation. */
9107 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
9109 enum machine_mode mode =
9110 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
9112 /* Do not handle DImode compares that go trought special path. Also we can't
9113 deal with FP compares yet. This is possible to add. */
9114 if ((mode == DImode && !TARGET_64BIT))
9116 if (FLOAT_MODE_P (mode))
9118 rtx second_test = NULL, bypass_test = NULL;
9119 rtx compare_op, compare_seq;
9121 /* Shortcut: following common codes never translate into carry flag compares. */
9122 if (code == EQ || code == NE || code == UNEQ || code == LTGT
9123 || code == ORDERED || code == UNORDERED)
9126 /* These comparisons require zero flag; swap operands so they won't. */
9127 if ((code == GT || code == UNLE || code == LE || code == UNGT)
9133 code = swap_condition (code);
9136 /* Try to expand the comparison and verify that we end up with carry flag
9137 based comparison. This is fails to be true only when we decide to expand
9138 comparison using arithmetic that is not too common scenario. */
9140 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9141 &second_test, &bypass_test);
9142 compare_seq = get_insns ();
9145 if (second_test || bypass_test)
9147 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9148 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9149 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
9151 code = GET_CODE (compare_op);
9152 if (code != LTU && code != GEU)
9154 emit_insn (compare_seq);
9158 if (!INTEGRAL_MODE_P (mode))
9166 /* Convert a==0 into (unsigned)a<1. */
9169 if (op1 != const0_rtx)
9172 code = (code == EQ ? LTU : GEU);
9175 /* Convert a>b into b<a or a>=b-1. */
9178 if (GET_CODE (op1) == CONST_INT)
9180 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
9181 /* Bail out on overflow. We still can swap operands but that
9182 would force loading of the constant into register. */
9183 if (op1 == const0_rtx
9184 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
9186 code = (code == GTU ? GEU : LTU);
9193 code = (code == GTU ? LTU : GEU);
9197 /* Convert a>=0 into (unsigned)a<0x80000000. */
9200 if (mode == DImode || op1 != const0_rtx)
9202 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9203 code = (code == LT ? GEU : LTU);
9207 if (mode == DImode || op1 != constm1_rtx)
9209 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9210 code = (code == LE ? GEU : LTU);
9216 /* Swapping operands may cause constant to appear as first operand. */
9217 if (!nonimmediate_operand (op0, VOIDmode))
9221 op0 = force_reg (mode, op0);
9223 ix86_compare_op0 = op0;
9224 ix86_compare_op1 = op1;
9225 *pop = ix86_expand_compare (code, NULL, NULL);
9226 if (GET_CODE (*pop) != LTU && GET_CODE (*pop) != GEU)
9232 ix86_expand_int_movcc (rtx operands[])
9234 enum rtx_code code = GET_CODE (operands[1]), compare_code;
9235 rtx compare_seq, compare_op;
9236 rtx second_test, bypass_test;
9237 enum machine_mode mode = GET_MODE (operands[0]);
9238 bool sign_bit_compare_p = false;;
9241 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9242 compare_seq = get_insns ();
9245 compare_code = GET_CODE (compare_op);
9247 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
9248 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
9249 sign_bit_compare_p = true;
9251 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
9252 HImode insns, we'd be swallowed in word prefix ops. */
9254 if ((mode != HImode || TARGET_FAST_PREFIX)
9255 && (mode != DImode || TARGET_64BIT)
9256 && GET_CODE (operands[2]) == CONST_INT
9257 && GET_CODE (operands[3]) == CONST_INT)
9259 rtx out = operands[0];
9260 HOST_WIDE_INT ct = INTVAL (operands[2]);
9261 HOST_WIDE_INT cf = INTVAL (operands[3]);
9265 /* Sign bit compares are better done using shifts than we do by using
9267 if (sign_bit_compare_p
9268 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9269 ix86_compare_op1, &compare_op))
9271 /* Detect overlap between destination and compare sources. */
9274 if (!sign_bit_compare_p)
9278 compare_code = GET_CODE (compare_op);
9280 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9281 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9284 compare_code = ix86_fp_compare_code_to_integer (compare_code);
9287 /* To simplify rest of code, restrict to the GEU case. */
9288 if (compare_code == LTU)
9290 HOST_WIDE_INT tmp = ct;
9293 compare_code = reverse_condition (compare_code);
9294 code = reverse_condition (code);
9299 PUT_CODE (compare_op,
9300 reverse_condition_maybe_unordered
9301 (GET_CODE (compare_op)));
9303 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9307 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
9308 || reg_overlap_mentioned_p (out, ix86_compare_op1))
9309 tmp = gen_reg_rtx (mode);
9312 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
9314 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
9318 if (code == GT || code == GE)
9319 code = reverse_condition (code);
9322 HOST_WIDE_INT tmp = ct;
9327 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
9328 ix86_compare_op1, VOIDmode, 0, -1);
9341 tmp = expand_simple_binop (mode, PLUS,
9343 copy_rtx (tmp), 1, OPTAB_DIRECT);
9354 tmp = expand_simple_binop (mode, IOR,
9356 copy_rtx (tmp), 1, OPTAB_DIRECT);
9358 else if (diff == -1 && ct)
9368 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9370 tmp = expand_simple_binop (mode, PLUS,
9371 copy_rtx (tmp), GEN_INT (cf),
9372 copy_rtx (tmp), 1, OPTAB_DIRECT);
9380 * andl cf - ct, dest
9390 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9393 tmp = expand_simple_binop (mode, AND,
9395 gen_int_mode (cf - ct, mode),
9396 copy_rtx (tmp), 1, OPTAB_DIRECT);
9398 tmp = expand_simple_binop (mode, PLUS,
9399 copy_rtx (tmp), GEN_INT (ct),
9400 copy_rtx (tmp), 1, OPTAB_DIRECT);
9403 if (!rtx_equal_p (tmp, out))
9404 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
9406 return 1; /* DONE */
9412 tmp = ct, ct = cf, cf = tmp;
9414 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9416 /* We may be reversing unordered compare to normal compare, that
9417 is not valid in general (we may convert non-trapping condition
9418 to trapping one), however on i386 we currently emit all
9419 comparisons unordered. */
9420 compare_code = reverse_condition_maybe_unordered (compare_code);
9421 code = reverse_condition_maybe_unordered (code);
9425 compare_code = reverse_condition (compare_code);
9426 code = reverse_condition (code);
9430 compare_code = UNKNOWN;
9431 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
9432 && GET_CODE (ix86_compare_op1) == CONST_INT)
9434 if (ix86_compare_op1 == const0_rtx
9435 && (code == LT || code == GE))
9436 compare_code = code;
9437 else if (ix86_compare_op1 == constm1_rtx)
9441 else if (code == GT)
9446 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9447 if (compare_code != UNKNOWN
9448 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
9449 && (cf == -1 || ct == -1))
9451 /* If lea code below could be used, only optimize
9452 if it results in a 2 insn sequence. */
9454 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
9455 || diff == 3 || diff == 5 || diff == 9)
9456 || (compare_code == LT && ct == -1)
9457 || (compare_code == GE && cf == -1))
9460 * notl op1 (if necessary)
9468 code = reverse_condition (code);
9471 out = emit_store_flag (out, code, ix86_compare_op0,
9472 ix86_compare_op1, VOIDmode, 0, -1);
9474 out = expand_simple_binop (mode, IOR,
9476 out, 1, OPTAB_DIRECT);
9477 if (out != operands[0])
9478 emit_move_insn (operands[0], out);
9480 return 1; /* DONE */
9485 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
9486 || diff == 3 || diff == 5 || diff == 9)
9487 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
9489 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
9495 * lea cf(dest*(ct-cf)),dest
9499 * This also catches the degenerate setcc-only case.
9505 out = emit_store_flag (out, code, ix86_compare_op0,
9506 ix86_compare_op1, VOIDmode, 0, 1);
9509 /* On x86_64 the lea instruction operates on Pmode, so we need
9510 to get arithmetics done in proper mode to match. */
9512 tmp = copy_rtx (out);
9516 out1 = copy_rtx (out);
9517 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
9521 tmp = gen_rtx_PLUS (mode, tmp, out1);
9527 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
9530 if (!rtx_equal_p (tmp, out))
9533 out = force_operand (tmp, copy_rtx (out));
9535 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
9537 if (!rtx_equal_p (out, operands[0]))
9538 emit_move_insn (operands[0], copy_rtx (out));
9540 return 1; /* DONE */
9544 * General case: Jumpful:
9545 * xorl dest,dest cmpl op1, op2
9546 * cmpl op1, op2 movl ct, dest
9548 * decl dest movl cf, dest
9549 * andl (cf-ct),dest 1:
9554 * This is reasonably steep, but branch mispredict costs are
9555 * high on modern cpus, so consider failing only if optimizing
9559 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9560 && BRANCH_COST >= 2)
9566 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9567 /* We may be reversing unordered compare to normal compare,
9568 that is not valid in general (we may convert non-trapping
9569 condition to trapping one), however on i386 we currently
9570 emit all comparisons unordered. */
9571 code = reverse_condition_maybe_unordered (code);
9574 code = reverse_condition (code);
9575 if (compare_code != UNKNOWN)
9576 compare_code = reverse_condition (compare_code);
9580 if (compare_code != UNKNOWN)
9582 /* notl op1 (if needed)
9587 For x < 0 (resp. x <= -1) there will be no notl,
9588 so if possible swap the constants to get rid of the
9590 True/false will be -1/0 while code below (store flag
9591 followed by decrement) is 0/-1, so the constants need
9592 to be exchanged once more. */
9594 if (compare_code == GE || !cf)
9596 code = reverse_condition (code);
9601 HOST_WIDE_INT tmp = cf;
9606 out = emit_store_flag (out, code, ix86_compare_op0,
9607 ix86_compare_op1, VOIDmode, 0, -1);
9611 out = emit_store_flag (out, code, ix86_compare_op0,
9612 ix86_compare_op1, VOIDmode, 0, 1);
9614 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
9615 copy_rtx (out), 1, OPTAB_DIRECT);
9618 out = expand_simple_binop (mode, AND, copy_rtx (out),
9619 gen_int_mode (cf - ct, mode),
9620 copy_rtx (out), 1, OPTAB_DIRECT);
9622 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
9623 copy_rtx (out), 1, OPTAB_DIRECT);
9624 if (!rtx_equal_p (out, operands[0]))
9625 emit_move_insn (operands[0], copy_rtx (out));
9627 return 1; /* DONE */
9631 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9633 /* Try a few things more with specific constants and a variable. */
9636 rtx var, orig_out, out, tmp;
9638 if (BRANCH_COST <= 2)
9639 return 0; /* FAIL */
9641 /* If one of the two operands is an interesting constant, load a
9642 constant with the above and mask it in with a logical operation. */
9644 if (GET_CODE (operands[2]) == CONST_INT)
9647 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
9648 operands[3] = constm1_rtx, op = and_optab;
9649 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
9650 operands[3] = const0_rtx, op = ior_optab;
9652 return 0; /* FAIL */
9654 else if (GET_CODE (operands[3]) == CONST_INT)
9657 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
9658 operands[2] = constm1_rtx, op = and_optab;
9659 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
9660 operands[2] = const0_rtx, op = ior_optab;
9662 return 0; /* FAIL */
9665 return 0; /* FAIL */
9667 orig_out = operands[0];
9668 tmp = gen_reg_rtx (mode);
9671 /* Recurse to get the constant loaded. */
9672 if (ix86_expand_int_movcc (operands) == 0)
9673 return 0; /* FAIL */
9675 /* Mask in the interesting variable. */
9676 out = expand_binop (mode, op, var, tmp, orig_out, 0,
9678 if (!rtx_equal_p (out, orig_out))
9679 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
9681 return 1; /* DONE */
9685 * For comparison with above,
9695 if (! nonimmediate_operand (operands[2], mode))
9696 operands[2] = force_reg (mode, operands[2]);
9697 if (! nonimmediate_operand (operands[3], mode))
9698 operands[3] = force_reg (mode, operands[3]);
9700 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9702 rtx tmp = gen_reg_rtx (mode);
9703 emit_move_insn (tmp, operands[3]);
9706 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9708 rtx tmp = gen_reg_rtx (mode);
9709 emit_move_insn (tmp, operands[2]);
9713 if (! register_operand (operands[2], VOIDmode)
9715 || ! register_operand (operands[3], VOIDmode)))
9716 operands[2] = force_reg (mode, operands[2]);
9719 && ! register_operand (operands[3], VOIDmode))
9720 operands[3] = force_reg (mode, operands[3]);
9722 emit_insn (compare_seq);
9723 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9724 gen_rtx_IF_THEN_ELSE (mode,
9725 compare_op, operands[2],
9728 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9729 gen_rtx_IF_THEN_ELSE (mode,
9731 copy_rtx (operands[3]),
9732 copy_rtx (operands[0]))));
9734 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9735 gen_rtx_IF_THEN_ELSE (mode,
9737 copy_rtx (operands[2]),
9738 copy_rtx (operands[0]))));
9740 return 1; /* DONE */
9744 ix86_expand_fp_movcc (rtx operands[])
9748 rtx compare_op, second_test, bypass_test;
9750 /* For SF/DFmode conditional moves based on comparisons
9751 in same mode, we may want to use SSE min/max instructions. */
9752 if (((TARGET_SSE_MATH && GET_MODE (operands[0]) == SFmode)
9753 || (TARGET_SSE2 && TARGET_SSE_MATH && GET_MODE (operands[0]) == DFmode))
9754 && GET_MODE (ix86_compare_op0) == GET_MODE (operands[0])
9755 /* The SSE comparisons does not support the LTGT/UNEQ pair. */
9757 || (GET_CODE (operands[1]) != LTGT && GET_CODE (operands[1]) != UNEQ))
9758 /* We may be called from the post-reload splitter. */
9759 && (!REG_P (operands[0])
9760 || SSE_REG_P (operands[0])
9761 || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
9763 rtx op0 = ix86_compare_op0, op1 = ix86_compare_op1;
9764 code = GET_CODE (operands[1]);
9766 /* See if we have (cross) match between comparison operands and
9767 conditional move operands. */
9768 if (rtx_equal_p (operands[2], op1))
9773 code = reverse_condition_maybe_unordered (code);
9775 if (rtx_equal_p (operands[2], op0) && rtx_equal_p (operands[3], op1))
9777 /* Check for min operation. */
9778 if (code == LT || code == UNLE)
9786 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
9787 if (memory_operand (op0, VOIDmode))
9788 op0 = force_reg (GET_MODE (operands[0]), op0);
9789 if (GET_MODE (operands[0]) == SFmode)
9790 emit_insn (gen_minsf3 (operands[0], op0, op1));
9792 emit_insn (gen_mindf3 (operands[0], op0, op1));
9795 /* Check for max operation. */
9796 if (code == GT || code == UNGE)
9804 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
9805 if (memory_operand (op0, VOIDmode))
9806 op0 = force_reg (GET_MODE (operands[0]), op0);
9807 if (GET_MODE (operands[0]) == SFmode)
9808 emit_insn (gen_maxsf3 (operands[0], op0, op1));
9810 emit_insn (gen_maxdf3 (operands[0], op0, op1));
9814 /* Manage condition to be sse_comparison_operator. In case we are
9815 in non-ieee mode, try to canonicalize the destination operand
9816 to be first in the comparison - this helps reload to avoid extra
9818 if (!sse_comparison_operator (operands[1], VOIDmode)
9819 || (rtx_equal_p (operands[0], ix86_compare_op1) && !TARGET_IEEE_FP))
9821 rtx tmp = ix86_compare_op0;
9822 ix86_compare_op0 = ix86_compare_op1;
9823 ix86_compare_op1 = tmp;
9824 operands[1] = gen_rtx_fmt_ee (swap_condition (GET_CODE (operands[1])),
9825 VOIDmode, ix86_compare_op0,
9828 /* Similarly try to manage result to be first operand of conditional
9829 move. We also don't support the NE comparison on SSE, so try to
9831 if ((rtx_equal_p (operands[0], operands[3])
9832 && (!TARGET_IEEE_FP || GET_CODE (operands[1]) != EQ))
9833 || (GET_CODE (operands[1]) == NE && TARGET_IEEE_FP))
9835 rtx tmp = operands[2];
9836 operands[2] = operands[3];
9838 operands[1] = gen_rtx_fmt_ee (reverse_condition_maybe_unordered
9839 (GET_CODE (operands[1])),
9840 VOIDmode, ix86_compare_op0,
9843 if (GET_MODE (operands[0]) == SFmode)
9844 emit_insn (gen_sse_movsfcc (operands[0], operands[1],
9845 operands[2], operands[3],
9846 ix86_compare_op0, ix86_compare_op1));
9848 emit_insn (gen_sse_movdfcc (operands[0], operands[1],
9849 operands[2], operands[3],
9850 ix86_compare_op0, ix86_compare_op1));
9854 /* The floating point conditional move instructions don't directly
9855 support conditions resulting from a signed integer comparison. */
9857 code = GET_CODE (operands[1]);
9858 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9860 /* The floating point conditional move instructions don't directly
9861 support signed integer comparisons. */
9863 if (!fcmov_comparison_operator (compare_op, VOIDmode))
9865 if (second_test != NULL || bypass_test != NULL)
9867 tmp = gen_reg_rtx (QImode);
9868 ix86_expand_setcc (code, tmp);
9870 ix86_compare_op0 = tmp;
9871 ix86_compare_op1 = const0_rtx;
9872 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9874 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9876 tmp = gen_reg_rtx (GET_MODE (operands[0]));
9877 emit_move_insn (tmp, operands[3]);
9880 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9882 tmp = gen_reg_rtx (GET_MODE (operands[0]));
9883 emit_move_insn (tmp, operands[2]);
9887 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9888 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9893 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9894 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9899 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9900 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9908 /* Expand conditional increment or decrement using adb/sbb instructions.
9909 The default case using setcc followed by the conditional move can be
9910 done by generic code. */
9912 ix86_expand_int_addcc (rtx operands[])
9914 enum rtx_code code = GET_CODE (operands[1]);
9916 rtx val = const0_rtx;
9918 enum machine_mode mode = GET_MODE (operands[0]);
9920 if (operands[3] != const1_rtx
9921 && operands[3] != constm1_rtx)
9923 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9924 ix86_compare_op1, &compare_op))
9926 code = GET_CODE (compare_op);
9928 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9929 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9932 code = ix86_fp_compare_code_to_integer (code);
9939 PUT_CODE (compare_op,
9940 reverse_condition_maybe_unordered
9941 (GET_CODE (compare_op)));
9943 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9945 PUT_MODE (compare_op, mode);
9947 /* Construct either adc or sbb insn. */
9948 if ((code == LTU) == (operands[3] == constm1_rtx))
9950 switch (GET_MODE (operands[0]))
9953 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
9956 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
9959 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
9962 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9970 switch (GET_MODE (operands[0]))
9973 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
9976 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
9979 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
9982 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9988 return 1; /* DONE */
9992 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
9993 works for floating pointer parameters and nonoffsetable memories.
9994 For pushes, it returns just stack offsets; the values will be saved
9995 in the right order. Maximally three parts are generated. */
9998 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
10003 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
10005 size = (GET_MODE_SIZE (mode) + 4) / 8;
10007 if (GET_CODE (operand) == REG && MMX_REGNO_P (REGNO (operand)))
10009 if (size < 2 || size > 3)
10012 /* Optimize constant pool reference to immediates. This is used by fp
10013 moves, that force all constants to memory to allow combining. */
10014 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
10016 rtx tmp = maybe_get_pool_constant (operand);
10021 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
10023 /* The only non-offsetable memories we handle are pushes. */
10024 if (! push_operand (operand, VOIDmode))
10027 operand = copy_rtx (operand);
10028 PUT_MODE (operand, Pmode);
10029 parts[0] = parts[1] = parts[2] = operand;
10031 else if (!TARGET_64BIT)
10033 if (mode == DImode)
10034 split_di (&operand, 1, &parts[0], &parts[1]);
10037 if (REG_P (operand))
10039 if (!reload_completed)
10041 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
10042 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
10044 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
10046 else if (offsettable_memref_p (operand))
10048 operand = adjust_address (operand, SImode, 0);
10049 parts[0] = operand;
10050 parts[1] = adjust_address (operand, SImode, 4);
10052 parts[2] = adjust_address (operand, SImode, 8);
10054 else if (GET_CODE (operand) == CONST_DOUBLE)
10059 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10063 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
10064 parts[2] = gen_int_mode (l[2], SImode);
10067 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
10072 parts[1] = gen_int_mode (l[1], SImode);
10073 parts[0] = gen_int_mode (l[0], SImode);
10081 if (mode == TImode)
10082 split_ti (&operand, 1, &parts[0], &parts[1]);
10083 if (mode == XFmode || mode == TFmode)
10085 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
10086 if (REG_P (operand))
10088 if (!reload_completed)
10090 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
10091 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
10093 else if (offsettable_memref_p (operand))
10095 operand = adjust_address (operand, DImode, 0);
10096 parts[0] = operand;
10097 parts[1] = adjust_address (operand, upper_mode, 8);
10099 else if (GET_CODE (operand) == CONST_DOUBLE)
10104 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10105 real_to_target (l, &r, mode);
10107 /* Do not use shift by 32 to avoid warning on 32bit systems. */
10108 if (HOST_BITS_PER_WIDE_INT >= 64)
10111 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
10112 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
10115 parts[0] = immed_double_const (l[0], l[1], DImode);
10117 if (upper_mode == SImode)
10118 parts[1] = gen_int_mode (l[2], SImode);
10119 else if (HOST_BITS_PER_WIDE_INT >= 64)
10122 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
10123 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
10126 parts[1] = immed_double_const (l[2], l[3], DImode);
10136 /* Emit insns to perform a move or push of DI, DF, and XF values.
10137 Return false when normal moves are needed; true when all required
10138 insns have been emitted. Operands 2-4 contain the input values
10139 int the correct order; operands 5-7 contain the output values. */
10142 ix86_split_long_move (rtx operands[])
10147 int collisions = 0;
10148 enum machine_mode mode = GET_MODE (operands[0]);
10150 /* The DFmode expanders may ask us to move double.
10151 For 64bit target this is single move. By hiding the fact
10152 here we simplify i386.md splitters. */
10153 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
10155 /* Optimize constant pool reference to immediates. This is used by
10156 fp moves, that force all constants to memory to allow combining. */
10158 if (GET_CODE (operands[1]) == MEM
10159 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10160 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
10161 operands[1] = get_pool_constant (XEXP (operands[1], 0));
10162 if (push_operand (operands[0], VOIDmode))
10164 operands[0] = copy_rtx (operands[0]);
10165 PUT_MODE (operands[0], Pmode);
10168 operands[0] = gen_lowpart (DImode, operands[0]);
10169 operands[1] = gen_lowpart (DImode, operands[1]);
10170 emit_move_insn (operands[0], operands[1]);
10174 /* The only non-offsettable memory we handle is push. */
10175 if (push_operand (operands[0], VOIDmode))
10177 else if (GET_CODE (operands[0]) == MEM
10178 && ! offsettable_memref_p (operands[0]))
10181 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
10182 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
10184 /* When emitting push, take care for source operands on the stack. */
10185 if (push && GET_CODE (operands[1]) == MEM
10186 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
10189 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
10190 XEXP (part[1][2], 0));
10191 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
10192 XEXP (part[1][1], 0));
10195 /* We need to do copy in the right order in case an address register
10196 of the source overlaps the destination. */
10197 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
10199 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
10201 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10204 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
10207 /* Collision in the middle part can be handled by reordering. */
10208 if (collisions == 1 && nparts == 3
10209 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10212 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
10213 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
10216 /* If there are more collisions, we can't handle it by reordering.
10217 Do an lea to the last part and use only one colliding move. */
10218 else if (collisions > 1)
10224 base = part[0][nparts - 1];
10226 /* Handle the case when the last part isn't valid for lea.
10227 Happens in 64-bit mode storing the 12-byte XFmode. */
10228 if (GET_MODE (base) != Pmode)
10229 base = gen_rtx_REG (Pmode, REGNO (base));
10231 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
10232 part[1][0] = replace_equiv_address (part[1][0], base);
10233 part[1][1] = replace_equiv_address (part[1][1],
10234 plus_constant (base, UNITS_PER_WORD));
10236 part[1][2] = replace_equiv_address (part[1][2],
10237 plus_constant (base, 8));
10247 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
10248 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
10249 emit_move_insn (part[0][2], part[1][2]);
10254 /* In 64bit mode we don't have 32bit push available. In case this is
10255 register, it is OK - we will just use larger counterpart. We also
10256 retype memory - these comes from attempt to avoid REX prefix on
10257 moving of second half of TFmode value. */
10258 if (GET_MODE (part[1][1]) == SImode)
10260 if (GET_CODE (part[1][1]) == MEM)
10261 part[1][1] = adjust_address (part[1][1], DImode, 0);
10262 else if (REG_P (part[1][1]))
10263 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
10266 if (GET_MODE (part[1][0]) == SImode)
10267 part[1][0] = part[1][1];
10270 emit_move_insn (part[0][1], part[1][1]);
10271 emit_move_insn (part[0][0], part[1][0]);
10275 /* Choose correct order to not overwrite the source before it is copied. */
10276 if ((REG_P (part[0][0])
10277 && REG_P (part[1][1])
10278 && (REGNO (part[0][0]) == REGNO (part[1][1])
10280 && REGNO (part[0][0]) == REGNO (part[1][2]))))
10282 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
10286 operands[2] = part[0][2];
10287 operands[3] = part[0][1];
10288 operands[4] = part[0][0];
10289 operands[5] = part[1][2];
10290 operands[6] = part[1][1];
10291 operands[7] = part[1][0];
10295 operands[2] = part[0][1];
10296 operands[3] = part[0][0];
10297 operands[5] = part[1][1];
10298 operands[6] = part[1][0];
10305 operands[2] = part[0][0];
10306 operands[3] = part[0][1];
10307 operands[4] = part[0][2];
10308 operands[5] = part[1][0];
10309 operands[6] = part[1][1];
10310 operands[7] = part[1][2];
10314 operands[2] = part[0][0];
10315 operands[3] = part[0][1];
10316 operands[5] = part[1][0];
10317 operands[6] = part[1][1];
10321 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
10324 if (GET_CODE (operands[5]) == CONST_INT
10325 && operands[5] != const0_rtx
10326 && REG_P (operands[2]))
10328 if (GET_CODE (operands[6]) == CONST_INT
10329 && INTVAL (operands[6]) == INTVAL (operands[5]))
10330 operands[6] = operands[2];
10333 && GET_CODE (operands[7]) == CONST_INT
10334 && INTVAL (operands[7]) == INTVAL (operands[5]))
10335 operands[7] = operands[2];
10339 && GET_CODE (operands[6]) == CONST_INT
10340 && operands[6] != const0_rtx
10341 && REG_P (operands[3])
10342 && GET_CODE (operands[7]) == CONST_INT
10343 && INTVAL (operands[7]) == INTVAL (operands[6]))
10344 operands[7] = operands[3];
10347 emit_move_insn (operands[2], operands[5]);
10348 emit_move_insn (operands[3], operands[6]);
10350 emit_move_insn (operands[4], operands[7]);
10355 /* Helper function of ix86_split_ashldi used to generate an SImode
10356 left shift by a constant, either using a single shift or
10357 a sequence of add instructions. */
10360 ix86_expand_ashlsi3_const (rtx operand, int count)
10363 emit_insn (gen_addsi3 (operand, operand, operand));
10364 else if (!optimize_size
10365 && count * ix86_cost->add <= ix86_cost->shift_const)
10368 for (i=0; i<count; i++)
10369 emit_insn (gen_addsi3 (operand, operand, operand));
10372 emit_insn (gen_ashlsi3 (operand, operand, GEN_INT (count)));
10376 ix86_split_ashldi (rtx *operands, rtx scratch)
10378 rtx low[2], high[2];
10381 if (GET_CODE (operands[2]) == CONST_INT)
10383 split_di (operands, 2, low, high);
10384 count = INTVAL (operands[2]) & 63;
10388 emit_move_insn (high[0], low[1]);
10389 emit_move_insn (low[0], const0_rtx);
10392 ix86_expand_ashlsi3_const (high[0], count - 32);
10396 if (!rtx_equal_p (operands[0], operands[1]))
10397 emit_move_insn (operands[0], operands[1]);
10398 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
10399 ix86_expand_ashlsi3_const (low[0], count);
10404 split_di (operands, 1, low, high);
10406 if (operands[1] == const1_rtx)
10408 /* Assuming we've chosen a QImode capable registers, then 1LL << N
10409 can be done with two 32-bit shifts, no branches, no cmoves. */
10410 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
10412 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
10414 ix86_expand_clear (low[0]);
10415 ix86_expand_clear (high[0]);
10416 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
10418 d = gen_lowpart (QImode, low[0]);
10419 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10420 s = gen_rtx_EQ (QImode, flags, const0_rtx);
10421 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10423 d = gen_lowpart (QImode, high[0]);
10424 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10425 s = gen_rtx_NE (QImode, flags, const0_rtx);
10426 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10429 /* Otherwise, we can get the same results by manually performing
10430 a bit extract operation on bit 5, and then performing the two
10431 shifts. The two methods of getting 0/1 into low/high are exactly
10432 the same size. Avoiding the shift in the bit extract case helps
10433 pentium4 a bit; no one else seems to care much either way. */
10438 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
10439 x = gen_rtx_ZERO_EXTEND (SImode, operands[2]);
10441 x = gen_lowpart (SImode, operands[2]);
10442 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
10444 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (5)));
10445 emit_insn (gen_andsi3 (high[0], high[0], GEN_INT (1)));
10446 emit_move_insn (low[0], high[0]);
10447 emit_insn (gen_xorsi3 (low[0], low[0], GEN_INT (1)));
10450 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10451 emit_insn (gen_ashlsi3 (high[0], high[0], operands[2]));
10455 if (operands[1] == constm1_rtx)
10457 /* For -1LL << N, we can avoid the shld instruction, because we
10458 know that we're shifting 0...31 ones into a -1. */
10459 emit_move_insn (low[0], constm1_rtx);
10461 emit_move_insn (high[0], low[0]);
10463 emit_move_insn (high[0], constm1_rtx);
10467 if (!rtx_equal_p (operands[0], operands[1]))
10468 emit_move_insn (operands[0], operands[1]);
10470 split_di (operands, 1, low, high);
10471 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
10474 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10476 if (TARGET_CMOVE && scratch)
10478 ix86_expand_clear (scratch);
10479 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
10482 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
10486 ix86_split_ashrdi (rtx *operands, rtx scratch)
10488 rtx low[2], high[2];
10491 if (GET_CODE (operands[2]) == CONST_INT)
10493 split_di (operands, 2, low, high);
10494 count = INTVAL (operands[2]) & 63;
10498 emit_move_insn (high[0], high[1]);
10499 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10500 emit_move_insn (low[0], high[0]);
10503 else if (count >= 32)
10505 emit_move_insn (low[0], high[1]);
10506 emit_move_insn (high[0], low[0]);
10507 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10509 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
10513 if (!rtx_equal_p (operands[0], operands[1]))
10514 emit_move_insn (operands[0], operands[1]);
10515 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10516 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
10521 if (!rtx_equal_p (operands[0], operands[1]))
10522 emit_move_insn (operands[0], operands[1]);
10524 split_di (operands, 1, low, high);
10526 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10527 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
10529 if (TARGET_CMOVE && scratch)
10531 emit_move_insn (scratch, high[0]);
10532 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
10533 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10537 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
10542 ix86_split_lshrdi (rtx *operands, rtx scratch)
10544 rtx low[2], high[2];
10547 if (GET_CODE (operands[2]) == CONST_INT)
10549 split_di (operands, 2, low, high);
10550 count = INTVAL (operands[2]) & 63;
10554 emit_move_insn (low[0], high[1]);
10555 ix86_expand_clear (high[0]);
10558 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
10562 if (!rtx_equal_p (operands[0], operands[1]))
10563 emit_move_insn (operands[0], operands[1]);
10564 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10565 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
10570 if (!rtx_equal_p (operands[0], operands[1]))
10571 emit_move_insn (operands[0], operands[1]);
10573 split_di (operands, 1, low, high);
10575 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10576 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
10578 /* Heh. By reversing the arguments, we can reuse this pattern. */
10579 if (TARGET_CMOVE && scratch)
10581 ix86_expand_clear (scratch);
10582 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10586 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
10590 /* Helper function for the string operations below. Dest VARIABLE whether
10591 it is aligned to VALUE bytes. If true, jump to the label. */
10593 ix86_expand_aligntest (rtx variable, int value)
10595 rtx label = gen_label_rtx ();
10596 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
10597 if (GET_MODE (variable) == DImode)
10598 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
10600 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
10601 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
10606 /* Adjust COUNTER by the VALUE. */
10608 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
10610 if (GET_MODE (countreg) == DImode)
10611 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
10613 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
10616 /* Zero extend possibly SImode EXP to Pmode register. */
10618 ix86_zero_extend_to_Pmode (rtx exp)
10621 if (GET_MODE (exp) == VOIDmode)
10622 return force_reg (Pmode, exp);
10623 if (GET_MODE (exp) == Pmode)
10624 return copy_to_mode_reg (Pmode, exp);
10625 r = gen_reg_rtx (Pmode);
10626 emit_insn (gen_zero_extendsidi2 (r, exp));
10630 /* Expand string move (memcpy) operation. Use i386 string operations when
10631 profitable. expand_clrmem contains similar code. */
10633 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
10635 rtx srcreg, destreg, countreg, srcexp, destexp;
10636 enum machine_mode counter_mode;
10637 HOST_WIDE_INT align = 0;
10638 unsigned HOST_WIDE_INT count = 0;
10640 if (GET_CODE (align_exp) == CONST_INT)
10641 align = INTVAL (align_exp);
10643 /* Can't use any of this if the user has appropriated esi or edi. */
10644 if (global_regs[4] || global_regs[5])
10647 /* This simple hack avoids all inlining code and simplifies code below. */
10648 if (!TARGET_ALIGN_STRINGOPS)
10651 if (GET_CODE (count_exp) == CONST_INT)
10653 count = INTVAL (count_exp);
10654 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10658 /* Figure out proper mode for counter. For 32bits it is always SImode,
10659 for 64bits use SImode when possible, otherwise DImode.
10660 Set count to number of bytes copied when known at compile time. */
10662 || GET_MODE (count_exp) == SImode
10663 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10664 counter_mode = SImode;
10666 counter_mode = DImode;
10668 if (counter_mode != SImode && counter_mode != DImode)
10671 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10672 if (destreg != XEXP (dst, 0))
10673 dst = replace_equiv_address_nv (dst, destreg);
10674 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
10675 if (srcreg != XEXP (src, 0))
10676 src = replace_equiv_address_nv (src, srcreg);
10678 /* When optimizing for size emit simple rep ; movsb instruction for
10679 counts not divisible by 4. */
10681 if ((!optimize || optimize_size) && (count == 0 || (count & 0x03)))
10683 emit_insn (gen_cld ());
10684 countreg = ix86_zero_extend_to_Pmode (count_exp);
10685 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10686 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
10687 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
10691 /* For constant aligned (or small unaligned) copies use rep movsl
10692 followed by code copying the rest. For PentiumPro ensure 8 byte
10693 alignment to allow rep movsl acceleration. */
10695 else if (count != 0
10697 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10698 || optimize_size || count < (unsigned int) 64))
10700 unsigned HOST_WIDE_INT offset = 0;
10701 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10702 rtx srcmem, dstmem;
10704 emit_insn (gen_cld ());
10705 if (count & ~(size - 1))
10707 countreg = copy_to_mode_reg (counter_mode,
10708 GEN_INT ((count >> (size == 4 ? 2 : 3))
10709 & (TARGET_64BIT ? -1 : 0x3fffffff)));
10710 countreg = ix86_zero_extend_to_Pmode (countreg);
10712 destexp = gen_rtx_ASHIFT (Pmode, countreg,
10713 GEN_INT (size == 4 ? 2 : 3));
10714 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10715 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10717 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10718 countreg, destexp, srcexp));
10719 offset = count & ~(size - 1);
10721 if (size == 8 && (count & 0x04))
10723 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
10725 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
10727 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10732 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
10734 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
10736 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10741 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
10743 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
10745 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10748 /* The generic code based on the glibc implementation:
10749 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
10750 allowing accelerated copying there)
10751 - copy the data using rep movsl
10752 - copy the rest. */
10757 rtx srcmem, dstmem;
10758 int desired_alignment = (TARGET_PENTIUMPRO
10759 && (count == 0 || count >= (unsigned int) 260)
10760 ? 8 : UNITS_PER_WORD);
10761 /* Get rid of MEM_OFFSETs, they won't be accurate. */
10762 dst = change_address (dst, BLKmode, destreg);
10763 src = change_address (src, BLKmode, srcreg);
10765 /* In case we don't know anything about the alignment, default to
10766 library version, since it is usually equally fast and result in
10769 Also emit call when we know that the count is large and call overhead
10770 will not be important. */
10771 if (!TARGET_INLINE_ALL_STRINGOPS
10772 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
10775 if (TARGET_SINGLE_STRINGOP)
10776 emit_insn (gen_cld ());
10778 countreg2 = gen_reg_rtx (Pmode);
10779 countreg = copy_to_mode_reg (counter_mode, count_exp);
10781 /* We don't use loops to align destination and to copy parts smaller
10782 than 4 bytes, because gcc is able to optimize such code better (in
10783 the case the destination or the count really is aligned, gcc is often
10784 able to predict the branches) and also it is friendlier to the
10785 hardware branch prediction.
10787 Using loops is beneficial for generic case, because we can
10788 handle small counts using the loops. Many CPUs (such as Athlon)
10789 have large REP prefix setup costs.
10791 This is quite costly. Maybe we can revisit this decision later or
10792 add some customizability to this code. */
10794 if (count == 0 && align < desired_alignment)
10796 label = gen_label_rtx ();
10797 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
10798 LEU, 0, counter_mode, 1, label);
10802 rtx label = ix86_expand_aligntest (destreg, 1);
10803 srcmem = change_address (src, QImode, srcreg);
10804 dstmem = change_address (dst, QImode, destreg);
10805 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10806 ix86_adjust_counter (countreg, 1);
10807 emit_label (label);
10808 LABEL_NUSES (label) = 1;
10812 rtx label = ix86_expand_aligntest (destreg, 2);
10813 srcmem = change_address (src, HImode, srcreg);
10814 dstmem = change_address (dst, HImode, destreg);
10815 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10816 ix86_adjust_counter (countreg, 2);
10817 emit_label (label);
10818 LABEL_NUSES (label) = 1;
10820 if (align <= 4 && desired_alignment > 4)
10822 rtx label = ix86_expand_aligntest (destreg, 4);
10823 srcmem = change_address (src, SImode, srcreg);
10824 dstmem = change_address (dst, SImode, destreg);
10825 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10826 ix86_adjust_counter (countreg, 4);
10827 emit_label (label);
10828 LABEL_NUSES (label) = 1;
10831 if (label && desired_alignment > 4 && !TARGET_64BIT)
10833 emit_label (label);
10834 LABEL_NUSES (label) = 1;
10837 if (!TARGET_SINGLE_STRINGOP)
10838 emit_insn (gen_cld ());
10841 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
10843 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
10847 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
10848 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
10850 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10851 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10852 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10853 countreg2, destexp, srcexp));
10857 emit_label (label);
10858 LABEL_NUSES (label) = 1;
10860 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
10862 srcmem = change_address (src, SImode, srcreg);
10863 dstmem = change_address (dst, SImode, destreg);
10864 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10866 if ((align <= 4 || count == 0) && TARGET_64BIT)
10868 rtx label = ix86_expand_aligntest (countreg, 4);
10869 srcmem = change_address (src, SImode, srcreg);
10870 dstmem = change_address (dst, SImode, destreg);
10871 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10872 emit_label (label);
10873 LABEL_NUSES (label) = 1;
10875 if (align > 2 && count != 0 && (count & 2))
10877 srcmem = change_address (src, HImode, srcreg);
10878 dstmem = change_address (dst, HImode, destreg);
10879 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10881 if (align <= 2 || count == 0)
10883 rtx label = ix86_expand_aligntest (countreg, 2);
10884 srcmem = change_address (src, HImode, srcreg);
10885 dstmem = change_address (dst, HImode, destreg);
10886 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10887 emit_label (label);
10888 LABEL_NUSES (label) = 1;
10890 if (align > 1 && count != 0 && (count & 1))
10892 srcmem = change_address (src, QImode, srcreg);
10893 dstmem = change_address (dst, QImode, destreg);
10894 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10896 if (align <= 1 || count == 0)
10898 rtx label = ix86_expand_aligntest (countreg, 1);
10899 srcmem = change_address (src, QImode, srcreg);
10900 dstmem = change_address (dst, QImode, destreg);
10901 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10902 emit_label (label);
10903 LABEL_NUSES (label) = 1;
10910 /* Expand string clear operation (bzero). Use i386 string operations when
10911 profitable. expand_movmem contains similar code. */
10913 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
10915 rtx destreg, zeroreg, countreg, destexp;
10916 enum machine_mode counter_mode;
10917 HOST_WIDE_INT align = 0;
10918 unsigned HOST_WIDE_INT count = 0;
10920 if (GET_CODE (align_exp) == CONST_INT)
10921 align = INTVAL (align_exp);
10923 /* Can't use any of this if the user has appropriated esi. */
10924 if (global_regs[4])
10927 /* This simple hack avoids all inlining code and simplifies code below. */
10928 if (!TARGET_ALIGN_STRINGOPS)
10931 if (GET_CODE (count_exp) == CONST_INT)
10933 count = INTVAL (count_exp);
10934 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10937 /* Figure out proper mode for counter. For 32bits it is always SImode,
10938 for 64bits use SImode when possible, otherwise DImode.
10939 Set count to number of bytes copied when known at compile time. */
10941 || GET_MODE (count_exp) == SImode
10942 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10943 counter_mode = SImode;
10945 counter_mode = DImode;
10947 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10948 if (destreg != XEXP (dst, 0))
10949 dst = replace_equiv_address_nv (dst, destreg);
10952 /* When optimizing for size emit simple rep ; movsb instruction for
10953 counts not divisible by 4. The movl $N, %ecx; rep; stosb
10954 sequence is 7 bytes long, so if optimizing for size and count is
10955 small enough that some stosl, stosw and stosb instructions without
10956 rep are shorter, fall back into the next if. */
10958 if ((!optimize || optimize_size)
10961 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
10963 emit_insn (gen_cld ());
10965 countreg = ix86_zero_extend_to_Pmode (count_exp);
10966 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
10967 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10968 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
10970 else if (count != 0
10972 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10973 || optimize_size || count < (unsigned int) 64))
10975 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10976 unsigned HOST_WIDE_INT offset = 0;
10978 emit_insn (gen_cld ());
10980 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
10981 if (count & ~(size - 1))
10983 unsigned HOST_WIDE_INT repcount;
10984 unsigned int max_nonrep;
10986 repcount = count >> (size == 4 ? 2 : 3);
10988 repcount &= 0x3fffffff;
10990 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
10991 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
10992 bytes. In both cases the latter seems to be faster for small
10994 max_nonrep = size == 4 ? 7 : 4;
10995 if (!optimize_size)
10998 case PROCESSOR_PENTIUM4:
10999 case PROCESSOR_NOCONA:
11006 if (repcount <= max_nonrep)
11007 while (repcount-- > 0)
11009 rtx mem = adjust_automodify_address_nv (dst,
11010 GET_MODE (zeroreg),
11012 emit_insn (gen_strset (destreg, mem, zeroreg));
11017 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
11018 countreg = ix86_zero_extend_to_Pmode (countreg);
11019 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11020 GEN_INT (size == 4 ? 2 : 3));
11021 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11022 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
11024 offset = count & ~(size - 1);
11027 if (size == 8 && (count & 0x04))
11029 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
11031 emit_insn (gen_strset (destreg, mem,
11032 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11037 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
11039 emit_insn (gen_strset (destreg, mem,
11040 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11045 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
11047 emit_insn (gen_strset (destreg, mem,
11048 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11055 /* Compute desired alignment of the string operation. */
11056 int desired_alignment = (TARGET_PENTIUMPRO
11057 && (count == 0 || count >= (unsigned int) 260)
11058 ? 8 : UNITS_PER_WORD);
11060 /* In case we don't know anything about the alignment, default to
11061 library version, since it is usually equally fast and result in
11064 Also emit call when we know that the count is large and call overhead
11065 will not be important. */
11066 if (!TARGET_INLINE_ALL_STRINGOPS
11067 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11070 if (TARGET_SINGLE_STRINGOP)
11071 emit_insn (gen_cld ());
11073 countreg2 = gen_reg_rtx (Pmode);
11074 countreg = copy_to_mode_reg (counter_mode, count_exp);
11075 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
11076 /* Get rid of MEM_OFFSET, it won't be accurate. */
11077 dst = change_address (dst, BLKmode, destreg);
11079 if (count == 0 && align < desired_alignment)
11081 label = gen_label_rtx ();
11082 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11083 LEU, 0, counter_mode, 1, label);
11087 rtx label = ix86_expand_aligntest (destreg, 1);
11088 emit_insn (gen_strset (destreg, dst,
11089 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11090 ix86_adjust_counter (countreg, 1);
11091 emit_label (label);
11092 LABEL_NUSES (label) = 1;
11096 rtx label = ix86_expand_aligntest (destreg, 2);
11097 emit_insn (gen_strset (destreg, dst,
11098 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11099 ix86_adjust_counter (countreg, 2);
11100 emit_label (label);
11101 LABEL_NUSES (label) = 1;
11103 if (align <= 4 && desired_alignment > 4)
11105 rtx label = ix86_expand_aligntest (destreg, 4);
11106 emit_insn (gen_strset (destreg, dst,
11108 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
11110 ix86_adjust_counter (countreg, 4);
11111 emit_label (label);
11112 LABEL_NUSES (label) = 1;
11115 if (label && desired_alignment > 4 && !TARGET_64BIT)
11117 emit_label (label);
11118 LABEL_NUSES (label) = 1;
11122 if (!TARGET_SINGLE_STRINGOP)
11123 emit_insn (gen_cld ());
11126 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11128 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
11132 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
11133 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
11135 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11136 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
11140 emit_label (label);
11141 LABEL_NUSES (label) = 1;
11144 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11145 emit_insn (gen_strset (destreg, dst,
11146 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11147 if (TARGET_64BIT && (align <= 4 || count == 0))
11149 rtx label = ix86_expand_aligntest (countreg, 4);
11150 emit_insn (gen_strset (destreg, dst,
11151 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11152 emit_label (label);
11153 LABEL_NUSES (label) = 1;
11155 if (align > 2 && count != 0 && (count & 2))
11156 emit_insn (gen_strset (destreg, dst,
11157 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11158 if (align <= 2 || count == 0)
11160 rtx label = ix86_expand_aligntest (countreg, 2);
11161 emit_insn (gen_strset (destreg, dst,
11162 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11163 emit_label (label);
11164 LABEL_NUSES (label) = 1;
11166 if (align > 1 && count != 0 && (count & 1))
11167 emit_insn (gen_strset (destreg, dst,
11168 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11169 if (align <= 1 || count == 0)
11171 rtx label = ix86_expand_aligntest (countreg, 1);
11172 emit_insn (gen_strset (destreg, dst,
11173 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11174 emit_label (label);
11175 LABEL_NUSES (label) = 1;
11181 /* Expand strlen. */
11183 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
11185 rtx addr, scratch1, scratch2, scratch3, scratch4;
11187 /* The generic case of strlen expander is long. Avoid it's
11188 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
11190 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11191 && !TARGET_INLINE_ALL_STRINGOPS
11193 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
11196 addr = force_reg (Pmode, XEXP (src, 0));
11197 scratch1 = gen_reg_rtx (Pmode);
11199 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11202 /* Well it seems that some optimizer does not combine a call like
11203 foo(strlen(bar), strlen(bar));
11204 when the move and the subtraction is done here. It does calculate
11205 the length just once when these instructions are done inside of
11206 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
11207 often used and I use one fewer register for the lifetime of
11208 output_strlen_unroll() this is better. */
11210 emit_move_insn (out, addr);
11212 ix86_expand_strlensi_unroll_1 (out, src, align);
11214 /* strlensi_unroll_1 returns the address of the zero at the end of
11215 the string, like memchr(), so compute the length by subtracting
11216 the start address. */
11218 emit_insn (gen_subdi3 (out, out, addr));
11220 emit_insn (gen_subsi3 (out, out, addr));
11225 scratch2 = gen_reg_rtx (Pmode);
11226 scratch3 = gen_reg_rtx (Pmode);
11227 scratch4 = force_reg (Pmode, constm1_rtx);
11229 emit_move_insn (scratch3, addr);
11230 eoschar = force_reg (QImode, eoschar);
11232 emit_insn (gen_cld ());
11233 src = replace_equiv_address_nv (src, scratch3);
11235 /* If .md starts supporting :P, this can be done in .md. */
11236 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
11237 scratch4), UNSPEC_SCAS);
11238 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
11241 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
11242 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
11246 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
11247 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
11253 /* Expand the appropriate insns for doing strlen if not just doing
11256 out = result, initialized with the start address
11257 align_rtx = alignment of the address.
11258 scratch = scratch register, initialized with the startaddress when
11259 not aligned, otherwise undefined
11261 This is just the body. It needs the initializations mentioned above and
11262 some address computing at the end. These things are done in i386.md. */
11265 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
11269 rtx align_2_label = NULL_RTX;
11270 rtx align_3_label = NULL_RTX;
11271 rtx align_4_label = gen_label_rtx ();
11272 rtx end_0_label = gen_label_rtx ();
11274 rtx tmpreg = gen_reg_rtx (SImode);
11275 rtx scratch = gen_reg_rtx (SImode);
11279 if (GET_CODE (align_rtx) == CONST_INT)
11280 align = INTVAL (align_rtx);
11282 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
11284 /* Is there a known alignment and is it less than 4? */
11287 rtx scratch1 = gen_reg_rtx (Pmode);
11288 emit_move_insn (scratch1, out);
11289 /* Is there a known alignment and is it not 2? */
11292 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
11293 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
11295 /* Leave just the 3 lower bits. */
11296 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
11297 NULL_RTX, 0, OPTAB_WIDEN);
11299 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
11300 Pmode, 1, align_4_label);
11301 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
11302 Pmode, 1, align_2_label);
11303 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
11304 Pmode, 1, align_3_label);
11308 /* Since the alignment is 2, we have to check 2 or 0 bytes;
11309 check if is aligned to 4 - byte. */
11311 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
11312 NULL_RTX, 0, OPTAB_WIDEN);
11314 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
11315 Pmode, 1, align_4_label);
11318 mem = change_address (src, QImode, out);
11320 /* Now compare the bytes. */
11322 /* Compare the first n unaligned byte on a byte per byte basis. */
11323 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
11324 QImode, 1, end_0_label);
11326 /* Increment the address. */
11328 emit_insn (gen_adddi3 (out, out, const1_rtx));
11330 emit_insn (gen_addsi3 (out, out, const1_rtx));
11332 /* Not needed with an alignment of 2 */
11335 emit_label (align_2_label);
11337 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11341 emit_insn (gen_adddi3 (out, out, const1_rtx));
11343 emit_insn (gen_addsi3 (out, out, const1_rtx));
11345 emit_label (align_3_label);
11348 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11352 emit_insn (gen_adddi3 (out, out, const1_rtx));
11354 emit_insn (gen_addsi3 (out, out, const1_rtx));
11357 /* Generate loop to check 4 bytes at a time. It is not a good idea to
11358 align this loop. It gives only huge programs, but does not help to
11360 emit_label (align_4_label);
11362 mem = change_address (src, SImode, out);
11363 emit_move_insn (scratch, mem);
11365 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
11367 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
11369 /* This formula yields a nonzero result iff one of the bytes is zero.
11370 This saves three branches inside loop and many cycles. */
11372 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
11373 emit_insn (gen_one_cmplsi2 (scratch, scratch));
11374 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
11375 emit_insn (gen_andsi3 (tmpreg, tmpreg,
11376 gen_int_mode (0x80808080, SImode)));
11377 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
11382 rtx reg = gen_reg_rtx (SImode);
11383 rtx reg2 = gen_reg_rtx (Pmode);
11384 emit_move_insn (reg, tmpreg);
11385 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
11387 /* If zero is not in the first two bytes, move two bytes forward. */
11388 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11389 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11390 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11391 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
11392 gen_rtx_IF_THEN_ELSE (SImode, tmp,
11395 /* Emit lea manually to avoid clobbering of flags. */
11396 emit_insn (gen_rtx_SET (SImode, reg2,
11397 gen_rtx_PLUS (Pmode, out, const2_rtx)));
11399 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11400 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11401 emit_insn (gen_rtx_SET (VOIDmode, out,
11402 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
11409 rtx end_2_label = gen_label_rtx ();
11410 /* Is zero in the first two bytes? */
11412 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11413 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11414 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
11415 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
11416 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
11418 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
11419 JUMP_LABEL (tmp) = end_2_label;
11421 /* Not in the first two. Move two bytes forward. */
11422 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
11424 emit_insn (gen_adddi3 (out, out, const2_rtx));
11426 emit_insn (gen_addsi3 (out, out, const2_rtx));
11428 emit_label (end_2_label);
11432 /* Avoid branch in fixing the byte. */
11433 tmpreg = gen_lowpart (QImode, tmpreg);
11434 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
11435 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
11437 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
11439 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
11441 emit_label (end_0_label);
11445 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
11446 rtx callarg2 ATTRIBUTE_UNUSED,
11447 rtx pop, int sibcall)
11449 rtx use = NULL, call;
11451 if (pop == const0_rtx)
11453 if (TARGET_64BIT && pop)
11457 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
11458 fnaddr = machopic_indirect_call_target (fnaddr);
11460 /* Static functions and indirect calls don't need the pic register. */
11461 if (! TARGET_64BIT && flag_pic
11462 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
11463 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
11464 use_reg (&use, pic_offset_table_rtx);
11466 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
11468 rtx al = gen_rtx_REG (QImode, 0);
11469 emit_move_insn (al, callarg2);
11470 use_reg (&use, al);
11472 #endif /* TARGET_MACHO */
11474 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
11476 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11477 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11479 if (sibcall && TARGET_64BIT
11480 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
11483 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11484 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
11485 emit_move_insn (fnaddr, addr);
11486 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11489 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
11491 call = gen_rtx_SET (VOIDmode, retval, call);
11494 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
11495 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
11496 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
11499 call = emit_call_insn (call);
11501 CALL_INSN_FUNCTION_USAGE (call) = use;
11505 /* Clear stack slot assignments remembered from previous functions.
11506 This is called from INIT_EXPANDERS once before RTL is emitted for each
11509 static struct machine_function *
11510 ix86_init_machine_status (void)
11512 struct machine_function *f;
11514 f = ggc_alloc_cleared (sizeof (struct machine_function));
11515 f->use_fast_prologue_epilogue_nregs = -1;
11520 /* Return a MEM corresponding to a stack slot with mode MODE.
11521 Allocate a new slot if necessary.
11523 The RTL for a function can have several slots available: N is
11524 which slot to use. */
11527 assign_386_stack_local (enum machine_mode mode, int n)
11529 struct stack_local_entry *s;
11531 if (n < 0 || n >= MAX_386_STACK_LOCALS)
11534 for (s = ix86_stack_locals; s; s = s->next)
11535 if (s->mode == mode && s->n == n)
11538 s = (struct stack_local_entry *)
11539 ggc_alloc (sizeof (struct stack_local_entry));
11542 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
11544 s->next = ix86_stack_locals;
11545 ix86_stack_locals = s;
11549 /* Construct the SYMBOL_REF for the tls_get_addr function. */
11551 static GTY(()) rtx ix86_tls_symbol;
11553 ix86_tls_get_addr (void)
11556 if (!ix86_tls_symbol)
11558 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
11559 (TARGET_GNU_TLS && !TARGET_64BIT)
11560 ? "___tls_get_addr"
11561 : "__tls_get_addr");
11564 return ix86_tls_symbol;
11567 /* Calculate the length of the memory address in the instruction
11568 encoding. Does not include the one-byte modrm, opcode, or prefix. */
11571 memory_address_length (rtx addr)
11573 struct ix86_address parts;
11574 rtx base, index, disp;
11577 if (GET_CODE (addr) == PRE_DEC
11578 || GET_CODE (addr) == POST_INC
11579 || GET_CODE (addr) == PRE_MODIFY
11580 || GET_CODE (addr) == POST_MODIFY)
11583 if (! ix86_decompose_address (addr, &parts))
11587 index = parts.index;
11592 - esp as the base always wants an index,
11593 - ebp as the base always wants a displacement. */
11595 /* Register Indirect. */
11596 if (base && !index && !disp)
11598 /* esp (for its index) and ebp (for its displacement) need
11599 the two-byte modrm form. */
11600 if (addr == stack_pointer_rtx
11601 || addr == arg_pointer_rtx
11602 || addr == frame_pointer_rtx
11603 || addr == hard_frame_pointer_rtx)
11607 /* Direct Addressing. */
11608 else if (disp && !base && !index)
11613 /* Find the length of the displacement constant. */
11616 if (GET_CODE (disp) == CONST_INT
11617 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
11623 /* ebp always wants a displacement. */
11624 else if (base == hard_frame_pointer_rtx)
11627 /* An index requires the two-byte modrm form.... */
11629 /* ...like esp, which always wants an index. */
11630 || base == stack_pointer_rtx
11631 || base == arg_pointer_rtx
11632 || base == frame_pointer_rtx)
11639 /* Compute default value for "length_immediate" attribute. When SHORTFORM
11640 is set, expect that insn have 8bit immediate alternative. */
11642 ix86_attr_length_immediate_default (rtx insn, int shortform)
11646 extract_insn_cached (insn);
11647 for (i = recog_data.n_operands - 1; i >= 0; --i)
11648 if (CONSTANT_P (recog_data.operand[i]))
11653 && GET_CODE (recog_data.operand[i]) == CONST_INT
11654 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
11658 switch (get_attr_mode (insn))
11669 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
11674 fatal_insn ("unknown insn mode", insn);
11680 /* Compute default value for "length_address" attribute. */
11682 ix86_attr_length_address_default (rtx insn)
11686 if (get_attr_type (insn) == TYPE_LEA)
11688 rtx set = PATTERN (insn);
11689 if (GET_CODE (set) == SET)
11691 else if (GET_CODE (set) == PARALLEL
11692 && GET_CODE (XVECEXP (set, 0, 0)) == SET)
11693 set = XVECEXP (set, 0, 0);
11696 #ifdef ENABLE_CHECKING
11702 return memory_address_length (SET_SRC (set));
11705 extract_insn_cached (insn);
11706 for (i = recog_data.n_operands - 1; i >= 0; --i)
11707 if (GET_CODE (recog_data.operand[i]) == MEM)
11709 return memory_address_length (XEXP (recog_data.operand[i], 0));
11715 /* Return the maximum number of instructions a cpu can issue. */
11718 ix86_issue_rate (void)
11722 case PROCESSOR_PENTIUM:
11726 case PROCESSOR_PENTIUMPRO:
11727 case PROCESSOR_PENTIUM4:
11728 case PROCESSOR_ATHLON:
11730 case PROCESSOR_NOCONA:
11738 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
11739 by DEP_INSN and nothing set by DEP_INSN. */
11742 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11746 /* Simplify the test for uninteresting insns. */
11747 if (insn_type != TYPE_SETCC
11748 && insn_type != TYPE_ICMOV
11749 && insn_type != TYPE_FCMOV
11750 && insn_type != TYPE_IBR)
11753 if ((set = single_set (dep_insn)) != 0)
11755 set = SET_DEST (set);
11758 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
11759 && XVECLEN (PATTERN (dep_insn), 0) == 2
11760 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
11761 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
11763 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11764 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11769 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
11772 /* This test is true if the dependent insn reads the flags but
11773 not any other potentially set register. */
11774 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
11777 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
11783 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
11784 address with operands set by DEP_INSN. */
11787 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11791 if (insn_type == TYPE_LEA
11794 addr = PATTERN (insn);
11795 if (GET_CODE (addr) == SET)
11797 else if (GET_CODE (addr) == PARALLEL
11798 && GET_CODE (XVECEXP (addr, 0, 0)) == SET)
11799 addr = XVECEXP (addr, 0, 0);
11802 addr = SET_SRC (addr);
11807 extract_insn_cached (insn);
11808 for (i = recog_data.n_operands - 1; i >= 0; --i)
11809 if (GET_CODE (recog_data.operand[i]) == MEM)
11811 addr = XEXP (recog_data.operand[i], 0);
11818 return modified_in_p (addr, dep_insn);
11822 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
11824 enum attr_type insn_type, dep_insn_type;
11825 enum attr_memory memory;
11827 int dep_insn_code_number;
11829 /* Anti and output dependencies have zero cost on all CPUs. */
11830 if (REG_NOTE_KIND (link) != 0)
11833 dep_insn_code_number = recog_memoized (dep_insn);
11835 /* If we can't recognize the insns, we can't really do anything. */
11836 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
11839 insn_type = get_attr_type (insn);
11840 dep_insn_type = get_attr_type (dep_insn);
11844 case PROCESSOR_PENTIUM:
11845 /* Address Generation Interlock adds a cycle of latency. */
11846 if (ix86_agi_dependant (insn, dep_insn, insn_type))
11849 /* ??? Compares pair with jump/setcc. */
11850 if (ix86_flags_dependant (insn, dep_insn, insn_type))
11853 /* Floating point stores require value to be ready one cycle earlier. */
11854 if (insn_type == TYPE_FMOV
11855 && get_attr_memory (insn) == MEMORY_STORE
11856 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11860 case PROCESSOR_PENTIUMPRO:
11861 memory = get_attr_memory (insn);
11863 /* INT->FP conversion is expensive. */
11864 if (get_attr_fp_int_src (dep_insn))
11867 /* There is one cycle extra latency between an FP op and a store. */
11868 if (insn_type == TYPE_FMOV
11869 && (set = single_set (dep_insn)) != NULL_RTX
11870 && (set2 = single_set (insn)) != NULL_RTX
11871 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
11872 && GET_CODE (SET_DEST (set2)) == MEM)
11875 /* Show ability of reorder buffer to hide latency of load by executing
11876 in parallel with previous instruction in case
11877 previous instruction is not needed to compute the address. */
11878 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11879 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11881 /* Claim moves to take one cycle, as core can issue one load
11882 at time and the next load can start cycle later. */
11883 if (dep_insn_type == TYPE_IMOV
11884 || dep_insn_type == TYPE_FMOV)
11892 memory = get_attr_memory (insn);
11894 /* The esp dependency is resolved before the instruction is really
11896 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
11897 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
11900 /* INT->FP conversion is expensive. */
11901 if (get_attr_fp_int_src (dep_insn))
11904 /* Show ability of reorder buffer to hide latency of load by executing
11905 in parallel with previous instruction in case
11906 previous instruction is not needed to compute the address. */
11907 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11908 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11910 /* Claim moves to take one cycle, as core can issue one load
11911 at time and the next load can start cycle later. */
11912 if (dep_insn_type == TYPE_IMOV
11913 || dep_insn_type == TYPE_FMOV)
11922 case PROCESSOR_ATHLON:
11924 memory = get_attr_memory (insn);
11926 /* Show ability of reorder buffer to hide latency of load by executing
11927 in parallel with previous instruction in case
11928 previous instruction is not needed to compute the address. */
11929 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11930 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11932 enum attr_unit unit = get_attr_unit (insn);
11935 /* Because of the difference between the length of integer and
11936 floating unit pipeline preparation stages, the memory operands
11937 for floating point are cheaper.
11939 ??? For Athlon it the difference is most probably 2. */
11940 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
11943 loadcost = TARGET_ATHLON ? 2 : 0;
11945 if (cost >= loadcost)
11958 /* How many alternative schedules to try. This should be as wide as the
11959 scheduling freedom in the DFA, but no wider. Making this value too
11960 large results extra work for the scheduler. */
11963 ia32_multipass_dfa_lookahead (void)
11965 if (ix86_tune == PROCESSOR_PENTIUM)
11968 if (ix86_tune == PROCESSOR_PENTIUMPRO
11969 || ix86_tune == PROCESSOR_K6)
11977 /* Compute the alignment given to a constant that is being placed in memory.
11978 EXP is the constant and ALIGN is the alignment that the object would
11980 The value of this function is used instead of that alignment to align
11984 ix86_constant_alignment (tree exp, int align)
11986 if (TREE_CODE (exp) == REAL_CST)
11988 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
11990 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
11993 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
11994 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
11995 return BITS_PER_WORD;
12000 /* Compute the alignment for a static variable.
12001 TYPE is the data type, and ALIGN is the alignment that
12002 the object would ordinarily have. The value of this function is used
12003 instead of that alignment to align the object. */
12006 ix86_data_alignment (tree type, int align)
12008 if (AGGREGATE_TYPE_P (type)
12009 && TYPE_SIZE (type)
12010 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12011 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
12012 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
12015 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12016 to 16byte boundary. */
12019 if (AGGREGATE_TYPE_P (type)
12020 && TYPE_SIZE (type)
12021 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12022 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
12023 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12027 if (TREE_CODE (type) == ARRAY_TYPE)
12029 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12031 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12034 else if (TREE_CODE (type) == COMPLEX_TYPE)
12037 if (TYPE_MODE (type) == DCmode && align < 64)
12039 if (TYPE_MODE (type) == XCmode && align < 128)
12042 else if ((TREE_CODE (type) == RECORD_TYPE
12043 || TREE_CODE (type) == UNION_TYPE
12044 || TREE_CODE (type) == QUAL_UNION_TYPE)
12045 && TYPE_FIELDS (type))
12047 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12049 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12052 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12053 || TREE_CODE (type) == INTEGER_TYPE)
12055 if (TYPE_MODE (type) == DFmode && align < 64)
12057 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12064 /* Compute the alignment for a local variable.
12065 TYPE is the data type, and ALIGN is the alignment that
12066 the object would ordinarily have. The value of this macro is used
12067 instead of that alignment to align the object. */
12070 ix86_local_alignment (tree type, int align)
12072 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12073 to 16byte boundary. */
12076 if (AGGREGATE_TYPE_P (type)
12077 && TYPE_SIZE (type)
12078 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12079 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
12080 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12083 if (TREE_CODE (type) == ARRAY_TYPE)
12085 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12087 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12090 else if (TREE_CODE (type) == COMPLEX_TYPE)
12092 if (TYPE_MODE (type) == DCmode && align < 64)
12094 if (TYPE_MODE (type) == XCmode && align < 128)
12097 else if ((TREE_CODE (type) == RECORD_TYPE
12098 || TREE_CODE (type) == UNION_TYPE
12099 || TREE_CODE (type) == QUAL_UNION_TYPE)
12100 && TYPE_FIELDS (type))
12102 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12104 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12107 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12108 || TREE_CODE (type) == INTEGER_TYPE)
12111 if (TYPE_MODE (type) == DFmode && align < 64)
12113 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12119 /* Emit RTL insns to initialize the variable parts of a trampoline.
12120 FNADDR is an RTX for the address of the function's pure code.
12121 CXT is an RTX for the static chain value for the function. */
12123 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
12127 /* Compute offset from the end of the jmp to the target function. */
12128 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
12129 plus_constant (tramp, 10),
12130 NULL_RTX, 1, OPTAB_DIRECT);
12131 emit_move_insn (gen_rtx_MEM (QImode, tramp),
12132 gen_int_mode (0xb9, QImode));
12133 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
12134 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
12135 gen_int_mode (0xe9, QImode));
12136 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
12141 /* Try to load address using shorter movl instead of movabs.
12142 We may want to support movq for kernel mode, but kernel does not use
12143 trampolines at the moment. */
12144 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
12146 fnaddr = copy_to_mode_reg (DImode, fnaddr);
12147 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12148 gen_int_mode (0xbb41, HImode));
12149 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
12150 gen_lowpart (SImode, fnaddr));
12155 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12156 gen_int_mode (0xbb49, HImode));
12157 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12161 /* Load static chain using movabs to r10. */
12162 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12163 gen_int_mode (0xba49, HImode));
12164 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12167 /* Jump to the r11 */
12168 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12169 gen_int_mode (0xff49, HImode));
12170 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
12171 gen_int_mode (0xe3, QImode));
12173 if (offset > TRAMPOLINE_SIZE)
12177 #ifdef ENABLE_EXECUTE_STACK
12178 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
12179 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
12183 /* Codes for all the SSE/MMX builtins. */
12186 IX86_BUILTIN_ADDPS,
12187 IX86_BUILTIN_ADDSS,
12188 IX86_BUILTIN_DIVPS,
12189 IX86_BUILTIN_DIVSS,
12190 IX86_BUILTIN_MULPS,
12191 IX86_BUILTIN_MULSS,
12192 IX86_BUILTIN_SUBPS,
12193 IX86_BUILTIN_SUBSS,
12195 IX86_BUILTIN_CMPEQPS,
12196 IX86_BUILTIN_CMPLTPS,
12197 IX86_BUILTIN_CMPLEPS,
12198 IX86_BUILTIN_CMPGTPS,
12199 IX86_BUILTIN_CMPGEPS,
12200 IX86_BUILTIN_CMPNEQPS,
12201 IX86_BUILTIN_CMPNLTPS,
12202 IX86_BUILTIN_CMPNLEPS,
12203 IX86_BUILTIN_CMPNGTPS,
12204 IX86_BUILTIN_CMPNGEPS,
12205 IX86_BUILTIN_CMPORDPS,
12206 IX86_BUILTIN_CMPUNORDPS,
12207 IX86_BUILTIN_CMPNEPS,
12208 IX86_BUILTIN_CMPEQSS,
12209 IX86_BUILTIN_CMPLTSS,
12210 IX86_BUILTIN_CMPLESS,
12211 IX86_BUILTIN_CMPNEQSS,
12212 IX86_BUILTIN_CMPNLTSS,
12213 IX86_BUILTIN_CMPNLESS,
12214 IX86_BUILTIN_CMPNGTSS,
12215 IX86_BUILTIN_CMPNGESS,
12216 IX86_BUILTIN_CMPORDSS,
12217 IX86_BUILTIN_CMPUNORDSS,
12218 IX86_BUILTIN_CMPNESS,
12220 IX86_BUILTIN_COMIEQSS,
12221 IX86_BUILTIN_COMILTSS,
12222 IX86_BUILTIN_COMILESS,
12223 IX86_BUILTIN_COMIGTSS,
12224 IX86_BUILTIN_COMIGESS,
12225 IX86_BUILTIN_COMINEQSS,
12226 IX86_BUILTIN_UCOMIEQSS,
12227 IX86_BUILTIN_UCOMILTSS,
12228 IX86_BUILTIN_UCOMILESS,
12229 IX86_BUILTIN_UCOMIGTSS,
12230 IX86_BUILTIN_UCOMIGESS,
12231 IX86_BUILTIN_UCOMINEQSS,
12233 IX86_BUILTIN_CVTPI2PS,
12234 IX86_BUILTIN_CVTPS2PI,
12235 IX86_BUILTIN_CVTSI2SS,
12236 IX86_BUILTIN_CVTSI642SS,
12237 IX86_BUILTIN_CVTSS2SI,
12238 IX86_BUILTIN_CVTSS2SI64,
12239 IX86_BUILTIN_CVTTPS2PI,
12240 IX86_BUILTIN_CVTTSS2SI,
12241 IX86_BUILTIN_CVTTSS2SI64,
12243 IX86_BUILTIN_MAXPS,
12244 IX86_BUILTIN_MAXSS,
12245 IX86_BUILTIN_MINPS,
12246 IX86_BUILTIN_MINSS,
12248 IX86_BUILTIN_LOADUPS,
12249 IX86_BUILTIN_STOREUPS,
12250 IX86_BUILTIN_MOVSS,
12252 IX86_BUILTIN_MOVHLPS,
12253 IX86_BUILTIN_MOVLHPS,
12254 IX86_BUILTIN_LOADHPS,
12255 IX86_BUILTIN_LOADLPS,
12256 IX86_BUILTIN_STOREHPS,
12257 IX86_BUILTIN_STORELPS,
12259 IX86_BUILTIN_MASKMOVQ,
12260 IX86_BUILTIN_MOVMSKPS,
12261 IX86_BUILTIN_PMOVMSKB,
12263 IX86_BUILTIN_MOVNTPS,
12264 IX86_BUILTIN_MOVNTQ,
12266 IX86_BUILTIN_LOADDQU,
12267 IX86_BUILTIN_STOREDQU,
12269 IX86_BUILTIN_LOADD,
12270 IX86_BUILTIN_STORED,
12272 IX86_BUILTIN_PACKSSWB,
12273 IX86_BUILTIN_PACKSSDW,
12274 IX86_BUILTIN_PACKUSWB,
12276 IX86_BUILTIN_PADDB,
12277 IX86_BUILTIN_PADDW,
12278 IX86_BUILTIN_PADDD,
12279 IX86_BUILTIN_PADDQ,
12280 IX86_BUILTIN_PADDSB,
12281 IX86_BUILTIN_PADDSW,
12282 IX86_BUILTIN_PADDUSB,
12283 IX86_BUILTIN_PADDUSW,
12284 IX86_BUILTIN_PSUBB,
12285 IX86_BUILTIN_PSUBW,
12286 IX86_BUILTIN_PSUBD,
12287 IX86_BUILTIN_PSUBQ,
12288 IX86_BUILTIN_PSUBSB,
12289 IX86_BUILTIN_PSUBSW,
12290 IX86_BUILTIN_PSUBUSB,
12291 IX86_BUILTIN_PSUBUSW,
12294 IX86_BUILTIN_PANDN,
12298 IX86_BUILTIN_PAVGB,
12299 IX86_BUILTIN_PAVGW,
12301 IX86_BUILTIN_PCMPEQB,
12302 IX86_BUILTIN_PCMPEQW,
12303 IX86_BUILTIN_PCMPEQD,
12304 IX86_BUILTIN_PCMPGTB,
12305 IX86_BUILTIN_PCMPGTW,
12306 IX86_BUILTIN_PCMPGTD,
12308 IX86_BUILTIN_PMADDWD,
12310 IX86_BUILTIN_PMAXSW,
12311 IX86_BUILTIN_PMAXUB,
12312 IX86_BUILTIN_PMINSW,
12313 IX86_BUILTIN_PMINUB,
12315 IX86_BUILTIN_PMULHUW,
12316 IX86_BUILTIN_PMULHW,
12317 IX86_BUILTIN_PMULLW,
12319 IX86_BUILTIN_PSADBW,
12320 IX86_BUILTIN_PSHUFW,
12322 IX86_BUILTIN_PSLLW,
12323 IX86_BUILTIN_PSLLD,
12324 IX86_BUILTIN_PSLLQ,
12325 IX86_BUILTIN_PSRAW,
12326 IX86_BUILTIN_PSRAD,
12327 IX86_BUILTIN_PSRLW,
12328 IX86_BUILTIN_PSRLD,
12329 IX86_BUILTIN_PSRLQ,
12330 IX86_BUILTIN_PSLLWI,
12331 IX86_BUILTIN_PSLLDI,
12332 IX86_BUILTIN_PSLLQI,
12333 IX86_BUILTIN_PSRAWI,
12334 IX86_BUILTIN_PSRADI,
12335 IX86_BUILTIN_PSRLWI,
12336 IX86_BUILTIN_PSRLDI,
12337 IX86_BUILTIN_PSRLQI,
12339 IX86_BUILTIN_PUNPCKHBW,
12340 IX86_BUILTIN_PUNPCKHWD,
12341 IX86_BUILTIN_PUNPCKHDQ,
12342 IX86_BUILTIN_PUNPCKLBW,
12343 IX86_BUILTIN_PUNPCKLWD,
12344 IX86_BUILTIN_PUNPCKLDQ,
12346 IX86_BUILTIN_SHUFPS,
12348 IX86_BUILTIN_RCPPS,
12349 IX86_BUILTIN_RCPSS,
12350 IX86_BUILTIN_RSQRTPS,
12351 IX86_BUILTIN_RSQRTSS,
12352 IX86_BUILTIN_SQRTPS,
12353 IX86_BUILTIN_SQRTSS,
12355 IX86_BUILTIN_UNPCKHPS,
12356 IX86_BUILTIN_UNPCKLPS,
12358 IX86_BUILTIN_ANDPS,
12359 IX86_BUILTIN_ANDNPS,
12361 IX86_BUILTIN_XORPS,
12364 IX86_BUILTIN_LDMXCSR,
12365 IX86_BUILTIN_STMXCSR,
12366 IX86_BUILTIN_SFENCE,
12368 /* 3DNow! Original */
12369 IX86_BUILTIN_FEMMS,
12370 IX86_BUILTIN_PAVGUSB,
12371 IX86_BUILTIN_PF2ID,
12372 IX86_BUILTIN_PFACC,
12373 IX86_BUILTIN_PFADD,
12374 IX86_BUILTIN_PFCMPEQ,
12375 IX86_BUILTIN_PFCMPGE,
12376 IX86_BUILTIN_PFCMPGT,
12377 IX86_BUILTIN_PFMAX,
12378 IX86_BUILTIN_PFMIN,
12379 IX86_BUILTIN_PFMUL,
12380 IX86_BUILTIN_PFRCP,
12381 IX86_BUILTIN_PFRCPIT1,
12382 IX86_BUILTIN_PFRCPIT2,
12383 IX86_BUILTIN_PFRSQIT1,
12384 IX86_BUILTIN_PFRSQRT,
12385 IX86_BUILTIN_PFSUB,
12386 IX86_BUILTIN_PFSUBR,
12387 IX86_BUILTIN_PI2FD,
12388 IX86_BUILTIN_PMULHRW,
12390 /* 3DNow! Athlon Extensions */
12391 IX86_BUILTIN_PF2IW,
12392 IX86_BUILTIN_PFNACC,
12393 IX86_BUILTIN_PFPNACC,
12394 IX86_BUILTIN_PI2FW,
12395 IX86_BUILTIN_PSWAPDSI,
12396 IX86_BUILTIN_PSWAPDSF,
12399 IX86_BUILTIN_ADDPD,
12400 IX86_BUILTIN_ADDSD,
12401 IX86_BUILTIN_DIVPD,
12402 IX86_BUILTIN_DIVSD,
12403 IX86_BUILTIN_MULPD,
12404 IX86_BUILTIN_MULSD,
12405 IX86_BUILTIN_SUBPD,
12406 IX86_BUILTIN_SUBSD,
12408 IX86_BUILTIN_CMPEQPD,
12409 IX86_BUILTIN_CMPLTPD,
12410 IX86_BUILTIN_CMPLEPD,
12411 IX86_BUILTIN_CMPGTPD,
12412 IX86_BUILTIN_CMPGEPD,
12413 IX86_BUILTIN_CMPNEQPD,
12414 IX86_BUILTIN_CMPNLTPD,
12415 IX86_BUILTIN_CMPNLEPD,
12416 IX86_BUILTIN_CMPNGTPD,
12417 IX86_BUILTIN_CMPNGEPD,
12418 IX86_BUILTIN_CMPORDPD,
12419 IX86_BUILTIN_CMPUNORDPD,
12420 IX86_BUILTIN_CMPNEPD,
12421 IX86_BUILTIN_CMPEQSD,
12422 IX86_BUILTIN_CMPLTSD,
12423 IX86_BUILTIN_CMPLESD,
12424 IX86_BUILTIN_CMPNEQSD,
12425 IX86_BUILTIN_CMPNLTSD,
12426 IX86_BUILTIN_CMPNLESD,
12427 IX86_BUILTIN_CMPORDSD,
12428 IX86_BUILTIN_CMPUNORDSD,
12429 IX86_BUILTIN_CMPNESD,
12431 IX86_BUILTIN_COMIEQSD,
12432 IX86_BUILTIN_COMILTSD,
12433 IX86_BUILTIN_COMILESD,
12434 IX86_BUILTIN_COMIGTSD,
12435 IX86_BUILTIN_COMIGESD,
12436 IX86_BUILTIN_COMINEQSD,
12437 IX86_BUILTIN_UCOMIEQSD,
12438 IX86_BUILTIN_UCOMILTSD,
12439 IX86_BUILTIN_UCOMILESD,
12440 IX86_BUILTIN_UCOMIGTSD,
12441 IX86_BUILTIN_UCOMIGESD,
12442 IX86_BUILTIN_UCOMINEQSD,
12444 IX86_BUILTIN_MAXPD,
12445 IX86_BUILTIN_MAXSD,
12446 IX86_BUILTIN_MINPD,
12447 IX86_BUILTIN_MINSD,
12449 IX86_BUILTIN_ANDPD,
12450 IX86_BUILTIN_ANDNPD,
12452 IX86_BUILTIN_XORPD,
12454 IX86_BUILTIN_SQRTPD,
12455 IX86_BUILTIN_SQRTSD,
12457 IX86_BUILTIN_UNPCKHPD,
12458 IX86_BUILTIN_UNPCKLPD,
12460 IX86_BUILTIN_SHUFPD,
12462 IX86_BUILTIN_LOADUPD,
12463 IX86_BUILTIN_STOREUPD,
12464 IX86_BUILTIN_MOVSD,
12466 IX86_BUILTIN_LOADHPD,
12467 IX86_BUILTIN_LOADLPD,
12469 IX86_BUILTIN_CVTDQ2PD,
12470 IX86_BUILTIN_CVTDQ2PS,
12472 IX86_BUILTIN_CVTPD2DQ,
12473 IX86_BUILTIN_CVTPD2PI,
12474 IX86_BUILTIN_CVTPD2PS,
12475 IX86_BUILTIN_CVTTPD2DQ,
12476 IX86_BUILTIN_CVTTPD2PI,
12478 IX86_BUILTIN_CVTPI2PD,
12479 IX86_BUILTIN_CVTSI2SD,
12480 IX86_BUILTIN_CVTSI642SD,
12482 IX86_BUILTIN_CVTSD2SI,
12483 IX86_BUILTIN_CVTSD2SI64,
12484 IX86_BUILTIN_CVTSD2SS,
12485 IX86_BUILTIN_CVTSS2SD,
12486 IX86_BUILTIN_CVTTSD2SI,
12487 IX86_BUILTIN_CVTTSD2SI64,
12489 IX86_BUILTIN_CVTPS2DQ,
12490 IX86_BUILTIN_CVTPS2PD,
12491 IX86_BUILTIN_CVTTPS2DQ,
12493 IX86_BUILTIN_MOVNTI,
12494 IX86_BUILTIN_MOVNTPD,
12495 IX86_BUILTIN_MOVNTDQ,
12498 IX86_BUILTIN_MASKMOVDQU,
12499 IX86_BUILTIN_MOVMSKPD,
12500 IX86_BUILTIN_PMOVMSKB128,
12501 IX86_BUILTIN_MOVQ2DQ,
12502 IX86_BUILTIN_MOVDQ2Q,
12504 IX86_BUILTIN_PACKSSWB128,
12505 IX86_BUILTIN_PACKSSDW128,
12506 IX86_BUILTIN_PACKUSWB128,
12508 IX86_BUILTIN_PADDB128,
12509 IX86_BUILTIN_PADDW128,
12510 IX86_BUILTIN_PADDD128,
12511 IX86_BUILTIN_PADDQ128,
12512 IX86_BUILTIN_PADDSB128,
12513 IX86_BUILTIN_PADDSW128,
12514 IX86_BUILTIN_PADDUSB128,
12515 IX86_BUILTIN_PADDUSW128,
12516 IX86_BUILTIN_PSUBB128,
12517 IX86_BUILTIN_PSUBW128,
12518 IX86_BUILTIN_PSUBD128,
12519 IX86_BUILTIN_PSUBQ128,
12520 IX86_BUILTIN_PSUBSB128,
12521 IX86_BUILTIN_PSUBSW128,
12522 IX86_BUILTIN_PSUBUSB128,
12523 IX86_BUILTIN_PSUBUSW128,
12525 IX86_BUILTIN_PAND128,
12526 IX86_BUILTIN_PANDN128,
12527 IX86_BUILTIN_POR128,
12528 IX86_BUILTIN_PXOR128,
12530 IX86_BUILTIN_PAVGB128,
12531 IX86_BUILTIN_PAVGW128,
12533 IX86_BUILTIN_PCMPEQB128,
12534 IX86_BUILTIN_PCMPEQW128,
12535 IX86_BUILTIN_PCMPEQD128,
12536 IX86_BUILTIN_PCMPGTB128,
12537 IX86_BUILTIN_PCMPGTW128,
12538 IX86_BUILTIN_PCMPGTD128,
12540 IX86_BUILTIN_PMADDWD128,
12542 IX86_BUILTIN_PMAXSW128,
12543 IX86_BUILTIN_PMAXUB128,
12544 IX86_BUILTIN_PMINSW128,
12545 IX86_BUILTIN_PMINUB128,
12547 IX86_BUILTIN_PMULUDQ,
12548 IX86_BUILTIN_PMULUDQ128,
12549 IX86_BUILTIN_PMULHUW128,
12550 IX86_BUILTIN_PMULHW128,
12551 IX86_BUILTIN_PMULLW128,
12553 IX86_BUILTIN_PSADBW128,
12554 IX86_BUILTIN_PSHUFHW,
12555 IX86_BUILTIN_PSHUFLW,
12556 IX86_BUILTIN_PSHUFD,
12558 IX86_BUILTIN_PSLLW128,
12559 IX86_BUILTIN_PSLLD128,
12560 IX86_BUILTIN_PSLLQ128,
12561 IX86_BUILTIN_PSRAW128,
12562 IX86_BUILTIN_PSRAD128,
12563 IX86_BUILTIN_PSRLW128,
12564 IX86_BUILTIN_PSRLD128,
12565 IX86_BUILTIN_PSRLQ128,
12566 IX86_BUILTIN_PSLLDQI128,
12567 IX86_BUILTIN_PSLLWI128,
12568 IX86_BUILTIN_PSLLDI128,
12569 IX86_BUILTIN_PSLLQI128,
12570 IX86_BUILTIN_PSRAWI128,
12571 IX86_BUILTIN_PSRADI128,
12572 IX86_BUILTIN_PSRLDQI128,
12573 IX86_BUILTIN_PSRLWI128,
12574 IX86_BUILTIN_PSRLDI128,
12575 IX86_BUILTIN_PSRLQI128,
12577 IX86_BUILTIN_PUNPCKHBW128,
12578 IX86_BUILTIN_PUNPCKHWD128,
12579 IX86_BUILTIN_PUNPCKHDQ128,
12580 IX86_BUILTIN_PUNPCKHQDQ128,
12581 IX86_BUILTIN_PUNPCKLBW128,
12582 IX86_BUILTIN_PUNPCKLWD128,
12583 IX86_BUILTIN_PUNPCKLDQ128,
12584 IX86_BUILTIN_PUNPCKLQDQ128,
12586 IX86_BUILTIN_CLFLUSH,
12587 IX86_BUILTIN_MFENCE,
12588 IX86_BUILTIN_LFENCE,
12590 /* Prescott New Instructions. */
12591 IX86_BUILTIN_ADDSUBPS,
12592 IX86_BUILTIN_HADDPS,
12593 IX86_BUILTIN_HSUBPS,
12594 IX86_BUILTIN_MOVSHDUP,
12595 IX86_BUILTIN_MOVSLDUP,
12596 IX86_BUILTIN_ADDSUBPD,
12597 IX86_BUILTIN_HADDPD,
12598 IX86_BUILTIN_HSUBPD,
12599 IX86_BUILTIN_LDDQU,
12601 IX86_BUILTIN_MONITOR,
12602 IX86_BUILTIN_MWAIT,
12604 IX86_BUILTIN_VEC_INIT_V2SI,
12605 IX86_BUILTIN_VEC_INIT_V4HI,
12606 IX86_BUILTIN_VEC_INIT_V8QI,
12607 IX86_BUILTIN_VEC_EXT_V2DF,
12608 IX86_BUILTIN_VEC_EXT_V2DI,
12609 IX86_BUILTIN_VEC_EXT_V4SF,
12610 IX86_BUILTIN_VEC_EXT_V8HI,
12611 IX86_BUILTIN_VEC_EXT_V4HI,
12612 IX86_BUILTIN_VEC_SET_V8HI,
12613 IX86_BUILTIN_VEC_SET_V4HI,
12618 #define def_builtin(MASK, NAME, TYPE, CODE) \
12620 if ((MASK) & target_flags \
12621 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
12622 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
12623 NULL, NULL_TREE); \
12626 /* Bits for builtin_description.flag. */
12628 /* Set when we don't support the comparison natively, and should
12629 swap_comparison in order to support it. */
12630 #define BUILTIN_DESC_SWAP_OPERANDS 1
12632 struct builtin_description
12634 const unsigned int mask;
12635 const enum insn_code icode;
12636 const char *const name;
12637 const enum ix86_builtins code;
12638 const enum rtx_code comparison;
12639 const unsigned int flag;
12642 static const struct builtin_description bdesc_comi[] =
12644 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
12645 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
12646 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
12647 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
12648 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
12649 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
12650 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
12651 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
12652 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
12653 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
12654 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
12655 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
12656 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
12657 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
12658 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
12659 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
12660 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
12661 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
12662 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
12663 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
12664 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
12665 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
12666 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
12667 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
12670 static const struct builtin_description bdesc_2arg[] =
12673 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
12674 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
12675 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
12676 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
12677 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
12678 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
12679 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
12680 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
12682 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
12683 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
12684 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
12685 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
12686 BUILTIN_DESC_SWAP_OPERANDS },
12687 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
12688 BUILTIN_DESC_SWAP_OPERANDS },
12689 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
12690 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
12691 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
12692 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
12693 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
12694 BUILTIN_DESC_SWAP_OPERANDS },
12695 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
12696 BUILTIN_DESC_SWAP_OPERANDS },
12697 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
12698 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
12699 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
12700 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
12701 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
12702 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
12703 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
12704 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
12705 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
12706 BUILTIN_DESC_SWAP_OPERANDS },
12707 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
12708 BUILTIN_DESC_SWAP_OPERANDS },
12709 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
12711 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
12712 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
12713 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
12714 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
12716 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
12717 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
12718 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
12719 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
12721 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
12722 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
12723 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
12724 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
12725 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
12728 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
12729 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
12730 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
12731 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
12732 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
12733 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
12734 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
12735 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
12737 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
12738 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
12739 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
12740 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
12741 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
12742 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
12743 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
12744 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
12746 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
12747 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
12748 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
12750 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
12751 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
12752 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
12753 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
12755 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
12756 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
12758 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
12759 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
12760 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
12761 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
12762 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
12763 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
12765 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
12766 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
12767 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
12768 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
12770 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
12771 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
12772 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
12773 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
12774 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
12775 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
12778 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
12779 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
12780 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
12782 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
12783 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
12784 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
12786 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
12787 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
12788 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
12789 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
12790 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
12791 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
12793 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
12794 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
12795 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
12796 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
12797 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
12798 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
12800 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
12801 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
12802 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
12803 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
12805 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
12806 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
12809 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
12810 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
12811 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
12812 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
12813 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
12814 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
12815 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
12816 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
12818 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
12819 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
12820 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
12821 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
12822 BUILTIN_DESC_SWAP_OPERANDS },
12823 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
12824 BUILTIN_DESC_SWAP_OPERANDS },
12825 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
12826 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
12827 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
12828 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
12829 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
12830 BUILTIN_DESC_SWAP_OPERANDS },
12831 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
12832 BUILTIN_DESC_SWAP_OPERANDS },
12833 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
12834 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
12835 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
12836 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
12837 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
12838 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
12839 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
12840 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
12841 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
12843 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
12844 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
12845 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
12846 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
12848 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
12849 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
12850 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
12851 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
12853 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
12854 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
12855 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
12858 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
12859 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
12860 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
12861 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
12862 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
12863 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
12864 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
12865 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
12867 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
12868 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
12869 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
12870 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
12871 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
12872 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
12873 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
12874 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
12876 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
12877 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
12879 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
12880 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
12881 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
12882 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
12884 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
12885 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
12887 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
12888 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
12889 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
12890 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
12891 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
12892 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
12894 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
12895 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
12896 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
12897 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
12899 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
12900 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
12901 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
12902 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
12903 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
12904 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
12905 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
12906 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
12908 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
12909 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
12910 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
12912 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
12913 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
12915 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
12916 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
12918 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
12919 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
12920 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
12922 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
12923 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
12924 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
12926 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
12927 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
12929 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
12931 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
12932 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
12933 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
12934 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
12937 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
12938 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
12939 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
12940 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
12941 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
12942 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
12945 static const struct builtin_description bdesc_1arg[] =
12947 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
12948 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
12950 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
12951 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
12952 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
12954 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
12955 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
12956 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
12957 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
12958 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
12959 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
12961 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
12962 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
12964 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
12966 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
12967 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
12969 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
12970 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
12971 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
12972 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
12973 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
12975 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
12977 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
12978 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
12979 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
12980 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
12982 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
12983 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
12984 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
12987 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
12988 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
12992 ix86_init_builtins (void)
12995 ix86_init_mmx_sse_builtins ();
12998 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
12999 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
13002 ix86_init_mmx_sse_builtins (void)
13004 const struct builtin_description * d;
13007 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
13008 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
13009 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
13010 tree V2DI_type_node
13011 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
13012 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
13013 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
13014 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
13015 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
13016 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
13017 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
13019 tree pchar_type_node = build_pointer_type (char_type_node);
13020 tree pcchar_type_node = build_pointer_type (
13021 build_type_variant (char_type_node, 1, 0));
13022 tree pfloat_type_node = build_pointer_type (float_type_node);
13023 tree pcfloat_type_node = build_pointer_type (
13024 build_type_variant (float_type_node, 1, 0));
13025 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
13026 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
13027 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
13030 tree int_ftype_v4sf_v4sf
13031 = build_function_type_list (integer_type_node,
13032 V4SF_type_node, V4SF_type_node, NULL_TREE);
13033 tree v4si_ftype_v4sf_v4sf
13034 = build_function_type_list (V4SI_type_node,
13035 V4SF_type_node, V4SF_type_node, NULL_TREE);
13036 /* MMX/SSE/integer conversions. */
13037 tree int_ftype_v4sf
13038 = build_function_type_list (integer_type_node,
13039 V4SF_type_node, NULL_TREE);
13040 tree int64_ftype_v4sf
13041 = build_function_type_list (long_long_integer_type_node,
13042 V4SF_type_node, NULL_TREE);
13043 tree int_ftype_v8qi
13044 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
13045 tree v4sf_ftype_v4sf_int
13046 = build_function_type_list (V4SF_type_node,
13047 V4SF_type_node, integer_type_node, NULL_TREE);
13048 tree v4sf_ftype_v4sf_int64
13049 = build_function_type_list (V4SF_type_node,
13050 V4SF_type_node, long_long_integer_type_node,
13052 tree v4sf_ftype_v4sf_v2si
13053 = build_function_type_list (V4SF_type_node,
13054 V4SF_type_node, V2SI_type_node, NULL_TREE);
13056 /* Miscellaneous. */
13057 tree v8qi_ftype_v4hi_v4hi
13058 = build_function_type_list (V8QI_type_node,
13059 V4HI_type_node, V4HI_type_node, NULL_TREE);
13060 tree v4hi_ftype_v2si_v2si
13061 = build_function_type_list (V4HI_type_node,
13062 V2SI_type_node, V2SI_type_node, NULL_TREE);
13063 tree v4sf_ftype_v4sf_v4sf_int
13064 = build_function_type_list (V4SF_type_node,
13065 V4SF_type_node, V4SF_type_node,
13066 integer_type_node, NULL_TREE);
13067 tree v2si_ftype_v4hi_v4hi
13068 = build_function_type_list (V2SI_type_node,
13069 V4HI_type_node, V4HI_type_node, NULL_TREE);
13070 tree v4hi_ftype_v4hi_int
13071 = build_function_type_list (V4HI_type_node,
13072 V4HI_type_node, integer_type_node, NULL_TREE);
13073 tree v4hi_ftype_v4hi_di
13074 = build_function_type_list (V4HI_type_node,
13075 V4HI_type_node, long_long_unsigned_type_node,
13077 tree v2si_ftype_v2si_di
13078 = build_function_type_list (V2SI_type_node,
13079 V2SI_type_node, long_long_unsigned_type_node,
13081 tree void_ftype_void
13082 = build_function_type (void_type_node, void_list_node);
13083 tree void_ftype_unsigned
13084 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
13085 tree void_ftype_unsigned_unsigned
13086 = build_function_type_list (void_type_node, unsigned_type_node,
13087 unsigned_type_node, NULL_TREE);
13088 tree void_ftype_pcvoid_unsigned_unsigned
13089 = build_function_type_list (void_type_node, const_ptr_type_node,
13090 unsigned_type_node, unsigned_type_node,
13092 tree unsigned_ftype_void
13093 = build_function_type (unsigned_type_node, void_list_node);
13094 tree v2si_ftype_v4sf
13095 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
13096 /* Loads/stores. */
13097 tree void_ftype_v8qi_v8qi_pchar
13098 = build_function_type_list (void_type_node,
13099 V8QI_type_node, V8QI_type_node,
13100 pchar_type_node, NULL_TREE);
13101 tree v4sf_ftype_pcfloat
13102 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
13103 /* @@@ the type is bogus */
13104 tree v4sf_ftype_v4sf_pv2si
13105 = build_function_type_list (V4SF_type_node,
13106 V4SF_type_node, pv2si_type_node, NULL_TREE);
13107 tree void_ftype_pv2si_v4sf
13108 = build_function_type_list (void_type_node,
13109 pv2si_type_node, V4SF_type_node, NULL_TREE);
13110 tree void_ftype_pfloat_v4sf
13111 = build_function_type_list (void_type_node,
13112 pfloat_type_node, V4SF_type_node, NULL_TREE);
13113 tree void_ftype_pdi_di
13114 = build_function_type_list (void_type_node,
13115 pdi_type_node, long_long_unsigned_type_node,
13117 tree void_ftype_pv2di_v2di
13118 = build_function_type_list (void_type_node,
13119 pv2di_type_node, V2DI_type_node, NULL_TREE);
13120 /* Normal vector unops. */
13121 tree v4sf_ftype_v4sf
13122 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
13124 /* Normal vector binops. */
13125 tree v4sf_ftype_v4sf_v4sf
13126 = build_function_type_list (V4SF_type_node,
13127 V4SF_type_node, V4SF_type_node, NULL_TREE);
13128 tree v8qi_ftype_v8qi_v8qi
13129 = build_function_type_list (V8QI_type_node,
13130 V8QI_type_node, V8QI_type_node, NULL_TREE);
13131 tree v4hi_ftype_v4hi_v4hi
13132 = build_function_type_list (V4HI_type_node,
13133 V4HI_type_node, V4HI_type_node, NULL_TREE);
13134 tree v2si_ftype_v2si_v2si
13135 = build_function_type_list (V2SI_type_node,
13136 V2SI_type_node, V2SI_type_node, NULL_TREE);
13137 tree di_ftype_di_di
13138 = build_function_type_list (long_long_unsigned_type_node,
13139 long_long_unsigned_type_node,
13140 long_long_unsigned_type_node, NULL_TREE);
13142 tree v2si_ftype_v2sf
13143 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
13144 tree v2sf_ftype_v2si
13145 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
13146 tree v2si_ftype_v2si
13147 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
13148 tree v2sf_ftype_v2sf
13149 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
13150 tree v2sf_ftype_v2sf_v2sf
13151 = build_function_type_list (V2SF_type_node,
13152 V2SF_type_node, V2SF_type_node, NULL_TREE);
13153 tree v2si_ftype_v2sf_v2sf
13154 = build_function_type_list (V2SI_type_node,
13155 V2SF_type_node, V2SF_type_node, NULL_TREE);
13156 tree pint_type_node = build_pointer_type (integer_type_node);
13157 tree pcint_type_node = build_pointer_type (
13158 build_type_variant (integer_type_node, 1, 0));
13159 tree pdouble_type_node = build_pointer_type (double_type_node);
13160 tree pcdouble_type_node = build_pointer_type (
13161 build_type_variant (double_type_node, 1, 0));
13162 tree int_ftype_v2df_v2df
13163 = build_function_type_list (integer_type_node,
13164 V2DF_type_node, V2DF_type_node, NULL_TREE);
13166 tree ti_ftype_ti_ti
13167 = build_function_type_list (intTI_type_node,
13168 intTI_type_node, intTI_type_node, NULL_TREE);
13169 tree void_ftype_pcvoid
13170 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
13172 = build_function_type_list (V2DI_type_node,
13173 long_long_unsigned_type_node, NULL_TREE);
13175 = build_function_type_list (long_long_unsigned_type_node,
13176 V2DI_type_node, NULL_TREE);
13177 tree v4sf_ftype_v4si
13178 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
13179 tree v4si_ftype_v4sf
13180 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
13181 tree v2df_ftype_v4si
13182 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
13183 tree v4si_ftype_v2df
13184 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
13185 tree v2si_ftype_v2df
13186 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
13187 tree v4sf_ftype_v2df
13188 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
13189 tree v2df_ftype_v2si
13190 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
13191 tree v2df_ftype_v4sf
13192 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
13193 tree int_ftype_v2df
13194 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
13195 tree int64_ftype_v2df
13196 = build_function_type_list (long_long_integer_type_node,
13197 V2DF_type_node, NULL_TREE);
13198 tree v2df_ftype_v2df_int
13199 = build_function_type_list (V2DF_type_node,
13200 V2DF_type_node, integer_type_node, NULL_TREE);
13201 tree v2df_ftype_v2df_int64
13202 = build_function_type_list (V2DF_type_node,
13203 V2DF_type_node, long_long_integer_type_node,
13205 tree v4sf_ftype_v4sf_v2df
13206 = build_function_type_list (V4SF_type_node,
13207 V4SF_type_node, V2DF_type_node, NULL_TREE);
13208 tree v2df_ftype_v2df_v4sf
13209 = build_function_type_list (V2DF_type_node,
13210 V2DF_type_node, V4SF_type_node, NULL_TREE);
13211 tree v2df_ftype_v2df_v2df_int
13212 = build_function_type_list (V2DF_type_node,
13213 V2DF_type_node, V2DF_type_node,
13216 tree v2df_ftype_v2df_pcdouble
13217 = build_function_type_list (V2DF_type_node,
13218 V2DF_type_node, pcdouble_type_node, NULL_TREE);
13219 tree void_ftype_pdouble_v2df
13220 = build_function_type_list (void_type_node,
13221 pdouble_type_node, V2DF_type_node, NULL_TREE);
13222 tree void_ftype_pint_int
13223 = build_function_type_list (void_type_node,
13224 pint_type_node, integer_type_node, NULL_TREE);
13225 tree void_ftype_v16qi_v16qi_pchar
13226 = build_function_type_list (void_type_node,
13227 V16QI_type_node, V16QI_type_node,
13228 pchar_type_node, NULL_TREE);
13229 tree v2df_ftype_pcdouble
13230 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
13231 tree v2df_ftype_v2df_v2df
13232 = build_function_type_list (V2DF_type_node,
13233 V2DF_type_node, V2DF_type_node, NULL_TREE);
13234 tree v16qi_ftype_v16qi_v16qi
13235 = build_function_type_list (V16QI_type_node,
13236 V16QI_type_node, V16QI_type_node, NULL_TREE);
13237 tree v8hi_ftype_v8hi_v8hi
13238 = build_function_type_list (V8HI_type_node,
13239 V8HI_type_node, V8HI_type_node, NULL_TREE);
13240 tree v4si_ftype_v4si_v4si
13241 = build_function_type_list (V4SI_type_node,
13242 V4SI_type_node, V4SI_type_node, NULL_TREE);
13243 tree v2di_ftype_v2di_v2di
13244 = build_function_type_list (V2DI_type_node,
13245 V2DI_type_node, V2DI_type_node, NULL_TREE);
13246 tree v2di_ftype_v2df_v2df
13247 = build_function_type_list (V2DI_type_node,
13248 V2DF_type_node, V2DF_type_node, NULL_TREE);
13249 tree v2df_ftype_v2df
13250 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
13251 tree v2di_ftype_v2di_int
13252 = build_function_type_list (V2DI_type_node,
13253 V2DI_type_node, integer_type_node, NULL_TREE);
13254 tree v4si_ftype_v4si_int
13255 = build_function_type_list (V4SI_type_node,
13256 V4SI_type_node, integer_type_node, NULL_TREE);
13257 tree v8hi_ftype_v8hi_int
13258 = build_function_type_list (V8HI_type_node,
13259 V8HI_type_node, integer_type_node, NULL_TREE);
13260 tree v8hi_ftype_v8hi_v2di
13261 = build_function_type_list (V8HI_type_node,
13262 V8HI_type_node, V2DI_type_node, NULL_TREE);
13263 tree v4si_ftype_v4si_v2di
13264 = build_function_type_list (V4SI_type_node,
13265 V4SI_type_node, V2DI_type_node, NULL_TREE);
13266 tree v4si_ftype_v8hi_v8hi
13267 = build_function_type_list (V4SI_type_node,
13268 V8HI_type_node, V8HI_type_node, NULL_TREE);
13269 tree di_ftype_v8qi_v8qi
13270 = build_function_type_list (long_long_unsigned_type_node,
13271 V8QI_type_node, V8QI_type_node, NULL_TREE);
13272 tree di_ftype_v2si_v2si
13273 = build_function_type_list (long_long_unsigned_type_node,
13274 V2SI_type_node, V2SI_type_node, NULL_TREE);
13275 tree v2di_ftype_v16qi_v16qi
13276 = build_function_type_list (V2DI_type_node,
13277 V16QI_type_node, V16QI_type_node, NULL_TREE);
13278 tree v2di_ftype_v4si_v4si
13279 = build_function_type_list (V2DI_type_node,
13280 V4SI_type_node, V4SI_type_node, NULL_TREE);
13281 tree int_ftype_v16qi
13282 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
13283 tree v16qi_ftype_pcchar
13284 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
13285 tree void_ftype_pchar_v16qi
13286 = build_function_type_list (void_type_node,
13287 pchar_type_node, V16QI_type_node, NULL_TREE);
13288 tree v4si_ftype_pcint
13289 = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
13290 tree void_ftype_pcint_v4si
13291 = build_function_type_list (void_type_node,
13292 pcint_type_node, V4SI_type_node, NULL_TREE);
13293 tree v2di_ftype_v2di
13294 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
13297 tree float128_type;
13300 /* The __float80 type. */
13301 if (TYPE_MODE (long_double_type_node) == XFmode)
13302 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
13306 /* The __float80 type. */
13307 float80_type = make_node (REAL_TYPE);
13308 TYPE_PRECISION (float80_type) = 80;
13309 layout_type (float80_type);
13310 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
13313 float128_type = make_node (REAL_TYPE);
13314 TYPE_PRECISION (float128_type) = 128;
13315 layout_type (float128_type);
13316 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
13318 /* Add all builtins that are more or less simple operations on two
13320 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13322 /* Use one of the operands; the target can have a different mode for
13323 mask-generating compares. */
13324 enum machine_mode mode;
13329 mode = insn_data[d->icode].operand[1].mode;
13334 type = v16qi_ftype_v16qi_v16qi;
13337 type = v8hi_ftype_v8hi_v8hi;
13340 type = v4si_ftype_v4si_v4si;
13343 type = v2di_ftype_v2di_v2di;
13346 type = v2df_ftype_v2df_v2df;
13349 type = ti_ftype_ti_ti;
13352 type = v4sf_ftype_v4sf_v4sf;
13355 type = v8qi_ftype_v8qi_v8qi;
13358 type = v4hi_ftype_v4hi_v4hi;
13361 type = v2si_ftype_v2si_v2si;
13364 type = di_ftype_di_di;
13371 /* Override for comparisons. */
13372 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
13373 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
13374 type = v4si_ftype_v4sf_v4sf;
13376 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
13377 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
13378 type = v2di_ftype_v2df_v2df;
13380 def_builtin (d->mask, d->name, type, d->code);
13383 /* Add the remaining MMX insns with somewhat more complicated types. */
13384 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
13385 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
13386 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
13387 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
13389 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
13390 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
13391 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
13393 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
13394 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
13396 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
13397 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
13399 /* comi/ucomi insns. */
13400 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
13401 if (d->mask == MASK_SSE2)
13402 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
13404 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
13406 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
13407 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
13408 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
13410 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
13411 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
13412 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
13413 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
13414 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
13415 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
13416 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
13417 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
13418 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
13419 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
13420 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
13422 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
13424 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
13425 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
13427 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
13428 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
13429 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
13430 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
13432 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
13433 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
13434 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
13435 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
13437 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
13439 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
13441 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
13442 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
13443 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
13444 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
13445 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
13446 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
13448 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
13450 /* Original 3DNow! */
13451 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
13452 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
13453 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
13454 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
13455 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
13456 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
13457 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
13458 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
13459 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
13460 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
13461 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
13462 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
13463 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
13464 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
13465 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
13466 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
13467 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
13468 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
13469 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
13470 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
13472 /* 3DNow! extension as used in the Athlon CPU. */
13473 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
13474 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
13475 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
13476 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
13477 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
13478 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
13481 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
13482 def_builtin (MASK_SSE2, "__builtin_ia32_movq2dq", v2di_ftype_di, IX86_BUILTIN_MOVQ2DQ);
13483 def_builtin (MASK_SSE2, "__builtin_ia32_movdq2q", di_ftype_v2di, IX86_BUILTIN_MOVDQ2Q);
13485 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
13486 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
13488 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
13489 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
13491 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
13492 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
13493 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
13494 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
13495 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
13497 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
13498 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
13499 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
13500 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
13502 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
13503 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
13505 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
13507 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
13508 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
13510 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
13511 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
13512 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
13513 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
13514 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
13516 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
13518 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
13519 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
13520 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
13521 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
13523 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
13524 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
13525 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
13527 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
13528 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
13529 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
13530 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
13532 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
13533 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
13534 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
13536 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
13537 def_builtin (MASK_SSE2, "__builtin_ia32_loadd", v4si_ftype_pcint, IX86_BUILTIN_LOADD);
13538 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
13539 def_builtin (MASK_SSE2, "__builtin_ia32_stored", void_ftype_pcint_v4si, IX86_BUILTIN_STORED);
13540 def_builtin (MASK_SSE2, "__builtin_ia32_movq", v2di_ftype_v2di, IX86_BUILTIN_MOVQ);
13542 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
13543 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
13545 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
13546 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
13547 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
13549 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
13550 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
13551 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
13553 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
13554 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
13556 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
13557 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
13558 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
13559 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
13561 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
13562 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
13563 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
13564 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
13566 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
13567 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
13569 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
13571 /* Prescott New Instructions. */
13572 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
13573 void_ftype_pcvoid_unsigned_unsigned,
13574 IX86_BUILTIN_MONITOR);
13575 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
13576 void_ftype_unsigned_unsigned,
13577 IX86_BUILTIN_MWAIT);
13578 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
13580 IX86_BUILTIN_MOVSHDUP);
13581 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
13583 IX86_BUILTIN_MOVSLDUP);
13584 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
13585 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
13587 /* Access to the vec_init patterns. */
13588 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
13589 integer_type_node, NULL_TREE);
13590 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
13591 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
13593 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
13594 short_integer_type_node,
13595 short_integer_type_node,
13596 short_integer_type_node, NULL_TREE);
13597 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
13598 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
13600 ftype = build_function_type_list (V8QI_type_node, char_type_node,
13601 char_type_node, char_type_node,
13602 char_type_node, char_type_node,
13603 char_type_node, char_type_node,
13604 char_type_node, NULL_TREE);
13605 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
13606 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
13608 /* Access to the vec_extract patterns. */
13609 ftype = build_function_type_list (double_type_node, V2DF_type_node,
13610 integer_type_node, NULL_TREE);
13611 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
13612 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
13614 ftype = build_function_type_list (long_long_integer_type_node,
13615 V2DI_type_node, integer_type_node,
13617 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
13618 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
13620 ftype = build_function_type_list (float_type_node, V4SF_type_node,
13621 integer_type_node, NULL_TREE);
13622 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
13623 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
13625 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
13626 integer_type_node, NULL_TREE);
13627 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
13628 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
13630 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
13631 integer_type_node, NULL_TREE);
13632 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
13633 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
13635 /* Access to the vec_set patterns. */
13636 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
13638 integer_type_node, NULL_TREE);
13639 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
13640 ftype, IX86_BUILTIN_VEC_SET_V8HI);
13642 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
13644 integer_type_node, NULL_TREE);
13645 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
13646 ftype, IX86_BUILTIN_VEC_SET_V4HI);
13649 /* Errors in the source file can cause expand_expr to return const0_rtx
13650 where we expect a vector. To avoid crashing, use one of the vector
13651 clear instructions. */
13653 safe_vector_operand (rtx x, enum machine_mode mode)
13655 if (x == const0_rtx)
13656 x = CONST0_RTX (mode);
13660 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
13663 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
13666 tree arg0 = TREE_VALUE (arglist);
13667 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13668 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13669 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13670 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13671 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13672 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13674 if (VECTOR_MODE_P (mode0))
13675 op0 = safe_vector_operand (op0, mode0);
13676 if (VECTOR_MODE_P (mode1))
13677 op1 = safe_vector_operand (op1, mode1);
13679 if (optimize || !target
13680 || GET_MODE (target) != tmode
13681 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13682 target = gen_reg_rtx (tmode);
13684 if (GET_MODE (op1) == SImode && mode1 == TImode)
13686 rtx x = gen_reg_rtx (V4SImode);
13687 emit_insn (gen_sse2_loadd (x, op1));
13688 op1 = gen_lowpart (TImode, x);
13691 /* In case the insn wants input operands in modes different from
13692 the result, abort. */
13693 if ((GET_MODE (op0) != mode0 && GET_MODE (op0) != VOIDmode)
13694 || (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode))
13697 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
13698 op0 = copy_to_mode_reg (mode0, op0);
13699 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
13700 op1 = copy_to_mode_reg (mode1, op1);
13702 /* ??? Using ix86_fixup_binary_operands is problematic when
13703 we've got mismatched modes. Fake it. */
13709 if (tmode == mode0 && tmode == mode1)
13711 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
13715 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
13717 op0 = force_reg (mode0, op0);
13718 op1 = force_reg (mode1, op1);
13719 target = gen_reg_rtx (tmode);
13722 pat = GEN_FCN (icode) (target, op0, op1);
13729 /* Subroutine of ix86_expand_builtin to take care of stores. */
13732 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
13735 tree arg0 = TREE_VALUE (arglist);
13736 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13737 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13738 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13739 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
13740 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
13742 if (VECTOR_MODE_P (mode1))
13743 op1 = safe_vector_operand (op1, mode1);
13745 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13746 op1 = copy_to_mode_reg (mode1, op1);
13748 pat = GEN_FCN (icode) (op0, op1);
13754 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
13757 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
13758 rtx target, int do_load)
13761 tree arg0 = TREE_VALUE (arglist);
13762 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13763 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13764 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13766 if (optimize || !target
13767 || GET_MODE (target) != tmode
13768 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13769 target = gen_reg_rtx (tmode);
13771 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13774 if (VECTOR_MODE_P (mode0))
13775 op0 = safe_vector_operand (op0, mode0);
13777 if ((optimize && !register_operand (op0, mode0))
13778 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13779 op0 = copy_to_mode_reg (mode0, op0);
13782 pat = GEN_FCN (icode) (target, op0);
13789 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
13790 sqrtss, rsqrtss, rcpss. */
13793 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
13796 tree arg0 = TREE_VALUE (arglist);
13797 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13798 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13799 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13801 if (optimize || !target
13802 || GET_MODE (target) != tmode
13803 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13804 target = gen_reg_rtx (tmode);
13806 if (VECTOR_MODE_P (mode0))
13807 op0 = safe_vector_operand (op0, mode0);
13809 if ((optimize && !register_operand (op0, mode0))
13810 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13811 op0 = copy_to_mode_reg (mode0, op0);
13814 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
13815 op1 = copy_to_mode_reg (mode0, op1);
13817 pat = GEN_FCN (icode) (target, op0, op1);
13824 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
13827 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
13831 tree arg0 = TREE_VALUE (arglist);
13832 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13833 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13834 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13836 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
13837 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
13838 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
13839 enum rtx_code comparison = d->comparison;
13841 if (VECTOR_MODE_P (mode0))
13842 op0 = safe_vector_operand (op0, mode0);
13843 if (VECTOR_MODE_P (mode1))
13844 op1 = safe_vector_operand (op1, mode1);
13846 /* Swap operands if we have a comparison that isn't available in
13848 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
13850 rtx tmp = gen_reg_rtx (mode1);
13851 emit_move_insn (tmp, op1);
13856 if (optimize || !target
13857 || GET_MODE (target) != tmode
13858 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
13859 target = gen_reg_rtx (tmode);
13861 if ((optimize && !register_operand (op0, mode0))
13862 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
13863 op0 = copy_to_mode_reg (mode0, op0);
13864 if ((optimize && !register_operand (op1, mode1))
13865 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
13866 op1 = copy_to_mode_reg (mode1, op1);
13868 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13869 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
13876 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
13879 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
13883 tree arg0 = TREE_VALUE (arglist);
13884 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13885 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13886 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13888 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
13889 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
13890 enum rtx_code comparison = d->comparison;
13892 if (VECTOR_MODE_P (mode0))
13893 op0 = safe_vector_operand (op0, mode0);
13894 if (VECTOR_MODE_P (mode1))
13895 op1 = safe_vector_operand (op1, mode1);
13897 /* Swap operands if we have a comparison that isn't available in
13899 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
13906 target = gen_reg_rtx (SImode);
13907 emit_move_insn (target, const0_rtx);
13908 target = gen_rtx_SUBREG (QImode, target, 0);
13910 if ((optimize && !register_operand (op0, mode0))
13911 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
13912 op0 = copy_to_mode_reg (mode0, op0);
13913 if ((optimize && !register_operand (op1, mode1))
13914 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
13915 op1 = copy_to_mode_reg (mode1, op1);
13917 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13918 pat = GEN_FCN (d->icode) (op0, op1);
13922 emit_insn (gen_rtx_SET (VOIDmode,
13923 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
13924 gen_rtx_fmt_ee (comparison, QImode,
13928 return SUBREG_REG (target);
13931 /* Return the integer constant in ARG. Constrain it to be in the range
13932 of the subparts of VEC_TYPE; issue an error if not. */
13935 get_element_number (tree vec_type, tree arg)
13937 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
13939 if (!host_integerp (arg, 1)
13940 || (elt = tree_low_cst (arg, 1), elt > max))
13942 error ("selector must be an integer constant in the range 0..%i", max);
13949 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
13950 ix86_expand_vector_init. We DO have language-level syntax for this, in
13951 the form of (type){ init-list }. Except that since we can't place emms
13952 instructions from inside the compiler, we can't allow the use of MMX
13953 registers unless the user explicitly asks for it. So we do *not* define
13954 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
13955 we have builtins invoked by mmintrin.h that gives us license to emit
13956 these sorts of instructions. */
13959 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
13961 enum machine_mode tmode = TYPE_MODE (type);
13962 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
13963 int i, n_elt = GET_MODE_NUNITS (tmode);
13964 rtvec v = rtvec_alloc (n_elt);
13966 gcc_assert (VECTOR_MODE_P (tmode));
13968 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
13970 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
13971 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
13974 gcc_assert (arglist == NULL);
13976 if (!target || !register_operand (target, tmode))
13977 target = gen_reg_rtx (tmode);
13979 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
13983 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
13984 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
13985 had a language-level syntax for referencing vector elements. */
13988 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
13990 enum machine_mode tmode, mode0;
13995 arg0 = TREE_VALUE (arglist);
13996 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13998 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13999 elt = get_element_number (TREE_TYPE (arg0), arg1);
14001 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14002 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14003 gcc_assert (VECTOR_MODE_P (mode0));
14005 op0 = force_reg (mode0, op0);
14007 if (optimize || !target || !register_operand (target, tmode))
14008 target = gen_reg_rtx (tmode);
14010 ix86_expand_vector_extract (true, target, op0, elt);
14015 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14016 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
14017 a language-level syntax for referencing vector elements. */
14020 ix86_expand_vec_set_builtin (tree arglist)
14022 enum machine_mode tmode, mode1;
14023 tree arg0, arg1, arg2;
14027 arg0 = TREE_VALUE (arglist);
14028 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14029 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14031 tmode = TYPE_MODE (TREE_TYPE (arg0));
14032 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14033 gcc_assert (VECTOR_MODE_P (tmode));
14035 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
14036 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
14037 elt = get_element_number (TREE_TYPE (arg0), arg2);
14039 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14040 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14042 op0 = force_reg (tmode, op0);
14043 op1 = force_reg (mode1, op1);
14045 ix86_expand_vector_set (true, op0, op1, elt);
14050 /* Expand an expression EXP that calls a built-in function,
14051 with result going to TARGET if that's convenient
14052 (and in mode MODE if that's convenient).
14053 SUBTARGET may be used as the target for computing one of EXP's operands.
14054 IGNORE is nonzero if the value is to be ignored. */
14057 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
14058 enum machine_mode mode ATTRIBUTE_UNUSED,
14059 int ignore ATTRIBUTE_UNUSED)
14061 const struct builtin_description *d;
14063 enum insn_code icode;
14064 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
14065 tree arglist = TREE_OPERAND (exp, 1);
14066 tree arg0, arg1, arg2;
14067 rtx op0, op1, op2, pat;
14068 enum machine_mode tmode, mode0, mode1, mode2;
14069 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14073 case IX86_BUILTIN_EMMS:
14074 emit_insn (gen_mmx_emms ());
14077 case IX86_BUILTIN_SFENCE:
14078 emit_insn (gen_sse_sfence ());
14081 case IX86_BUILTIN_MASKMOVQ:
14082 case IX86_BUILTIN_MASKMOVDQU:
14083 icode = (fcode == IX86_BUILTIN_MASKMOVQ
14084 ? CODE_FOR_mmx_maskmovq
14085 : CODE_FOR_sse2_maskmovdqu);
14086 /* Note the arg order is different from the operand order. */
14087 arg1 = TREE_VALUE (arglist);
14088 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
14089 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14090 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14091 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14092 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14093 mode0 = insn_data[icode].operand[0].mode;
14094 mode1 = insn_data[icode].operand[1].mode;
14095 mode2 = insn_data[icode].operand[2].mode;
14097 op0 = force_reg (Pmode, op0);
14098 op0 = gen_rtx_MEM (mode1, op0);
14100 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14101 op0 = copy_to_mode_reg (mode0, op0);
14102 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14103 op1 = copy_to_mode_reg (mode1, op1);
14104 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
14105 op2 = copy_to_mode_reg (mode2, op2);
14106 pat = GEN_FCN (icode) (op0, op1, op2);
14112 case IX86_BUILTIN_SQRTSS:
14113 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
14114 case IX86_BUILTIN_RSQRTSS:
14115 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
14116 case IX86_BUILTIN_RCPSS:
14117 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
14119 case IX86_BUILTIN_LOADUPS:
14120 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
14122 case IX86_BUILTIN_STOREUPS:
14123 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
14125 case IX86_BUILTIN_LOADHPS:
14126 case IX86_BUILTIN_LOADLPS:
14127 case IX86_BUILTIN_LOADHPD:
14128 case IX86_BUILTIN_LOADLPD:
14129 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
14130 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
14131 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
14132 : CODE_FOR_sse2_loadlpd);
14133 arg0 = TREE_VALUE (arglist);
14134 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14135 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14136 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14137 tmode = insn_data[icode].operand[0].mode;
14138 mode0 = insn_data[icode].operand[1].mode;
14139 mode1 = insn_data[icode].operand[2].mode;
14141 op0 = force_reg (mode0, op0);
14142 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
14143 if (optimize || target == 0
14144 || GET_MODE (target) != tmode
14145 || !register_operand (target, tmode))
14146 target = gen_reg_rtx (tmode);
14147 pat = GEN_FCN (icode) (target, op0, op1);
14153 case IX86_BUILTIN_STOREHPS:
14154 case IX86_BUILTIN_STORELPS:
14155 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
14156 : CODE_FOR_sse_storelps);
14157 arg0 = TREE_VALUE (arglist);
14158 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14159 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14160 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14161 mode0 = insn_data[icode].operand[0].mode;
14162 mode1 = insn_data[icode].operand[1].mode;
14164 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14165 op1 = force_reg (mode1, op1);
14167 pat = GEN_FCN (icode) (op0, op1);
14173 case IX86_BUILTIN_MOVNTPS:
14174 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
14175 case IX86_BUILTIN_MOVNTQ:
14176 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
14178 case IX86_BUILTIN_LDMXCSR:
14179 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
14180 target = assign_386_stack_local (SImode, 0);
14181 emit_move_insn (target, op0);
14182 emit_insn (gen_sse_ldmxcsr (target));
14185 case IX86_BUILTIN_STMXCSR:
14186 target = assign_386_stack_local (SImode, 0);
14187 emit_insn (gen_sse_stmxcsr (target));
14188 return copy_to_mode_reg (SImode, target);
14190 case IX86_BUILTIN_SHUFPS:
14191 case IX86_BUILTIN_SHUFPD:
14192 icode = (fcode == IX86_BUILTIN_SHUFPS
14193 ? CODE_FOR_sse_shufps
14194 : CODE_FOR_sse2_shufpd);
14195 arg0 = TREE_VALUE (arglist);
14196 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14197 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14198 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14199 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14200 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14201 tmode = insn_data[icode].operand[0].mode;
14202 mode0 = insn_data[icode].operand[1].mode;
14203 mode1 = insn_data[icode].operand[2].mode;
14204 mode2 = insn_data[icode].operand[3].mode;
14206 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14207 op0 = copy_to_mode_reg (mode0, op0);
14208 if ((optimize && !register_operand (op1, mode1))
14209 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
14210 op1 = copy_to_mode_reg (mode1, op1);
14211 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14213 /* @@@ better error message */
14214 error ("mask must be an immediate");
14215 return gen_reg_rtx (tmode);
14217 if (optimize || target == 0
14218 || GET_MODE (target) != tmode
14219 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14220 target = gen_reg_rtx (tmode);
14221 pat = GEN_FCN (icode) (target, op0, op1, op2);
14227 case IX86_BUILTIN_PSHUFW:
14228 case IX86_BUILTIN_PSHUFD:
14229 case IX86_BUILTIN_PSHUFHW:
14230 case IX86_BUILTIN_PSHUFLW:
14231 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
14232 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
14233 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
14234 : CODE_FOR_mmx_pshufw);
14235 arg0 = TREE_VALUE (arglist);
14236 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14237 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14238 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14239 tmode = insn_data[icode].operand[0].mode;
14240 mode1 = insn_data[icode].operand[1].mode;
14241 mode2 = insn_data[icode].operand[2].mode;
14243 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14244 op0 = copy_to_mode_reg (mode1, op0);
14245 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14247 /* @@@ better error message */
14248 error ("mask must be an immediate");
14252 || GET_MODE (target) != tmode
14253 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14254 target = gen_reg_rtx (tmode);
14255 pat = GEN_FCN (icode) (target, op0, op1);
14261 case IX86_BUILTIN_PSLLDQI128:
14262 case IX86_BUILTIN_PSRLDQI128:
14263 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
14264 : CODE_FOR_sse2_lshrti3);
14265 arg0 = TREE_VALUE (arglist);
14266 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14267 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14268 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14269 tmode = insn_data[icode].operand[0].mode;
14270 mode1 = insn_data[icode].operand[1].mode;
14271 mode2 = insn_data[icode].operand[2].mode;
14273 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14275 op0 = copy_to_reg (op0);
14276 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
14278 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14280 error ("shift must be an immediate");
14283 target = gen_reg_rtx (V2DImode);
14284 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
14290 case IX86_BUILTIN_FEMMS:
14291 emit_insn (gen_mmx_femms ());
14294 case IX86_BUILTIN_PAVGUSB:
14295 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
14297 case IX86_BUILTIN_PF2ID:
14298 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
14300 case IX86_BUILTIN_PFACC:
14301 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
14303 case IX86_BUILTIN_PFADD:
14304 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
14306 case IX86_BUILTIN_PFCMPEQ:
14307 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
14309 case IX86_BUILTIN_PFCMPGE:
14310 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
14312 case IX86_BUILTIN_PFCMPGT:
14313 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
14315 case IX86_BUILTIN_PFMAX:
14316 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
14318 case IX86_BUILTIN_PFMIN:
14319 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
14321 case IX86_BUILTIN_PFMUL:
14322 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
14324 case IX86_BUILTIN_PFRCP:
14325 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
14327 case IX86_BUILTIN_PFRCPIT1:
14328 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
14330 case IX86_BUILTIN_PFRCPIT2:
14331 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
14333 case IX86_BUILTIN_PFRSQIT1:
14334 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
14336 case IX86_BUILTIN_PFRSQRT:
14337 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
14339 case IX86_BUILTIN_PFSUB:
14340 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
14342 case IX86_BUILTIN_PFSUBR:
14343 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
14345 case IX86_BUILTIN_PI2FD:
14346 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
14348 case IX86_BUILTIN_PMULHRW:
14349 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
14351 case IX86_BUILTIN_PF2IW:
14352 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
14354 case IX86_BUILTIN_PFNACC:
14355 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
14357 case IX86_BUILTIN_PFPNACC:
14358 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
14360 case IX86_BUILTIN_PI2FW:
14361 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
14363 case IX86_BUILTIN_PSWAPDSI:
14364 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
14366 case IX86_BUILTIN_PSWAPDSF:
14367 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
14369 case IX86_BUILTIN_SQRTSD:
14370 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
14371 case IX86_BUILTIN_LOADUPD:
14372 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
14373 case IX86_BUILTIN_STOREUPD:
14374 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
14376 case IX86_BUILTIN_MFENCE:
14377 emit_insn (gen_sse2_mfence ());
14379 case IX86_BUILTIN_LFENCE:
14380 emit_insn (gen_sse2_lfence ());
14383 case IX86_BUILTIN_CLFLUSH:
14384 arg0 = TREE_VALUE (arglist);
14385 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14386 icode = CODE_FOR_sse2_clflush;
14387 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
14388 op0 = copy_to_mode_reg (Pmode, op0);
14390 emit_insn (gen_sse2_clflush (op0));
14393 case IX86_BUILTIN_MOVNTPD:
14394 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
14395 case IX86_BUILTIN_MOVNTDQ:
14396 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
14397 case IX86_BUILTIN_MOVNTI:
14398 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
14400 case IX86_BUILTIN_LOADDQU:
14401 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
14402 case IX86_BUILTIN_LOADD:
14403 return ix86_expand_unop_builtin (CODE_FOR_sse2_loadd, arglist, target, 1);
14405 case IX86_BUILTIN_STOREDQU:
14406 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
14407 case IX86_BUILTIN_STORED:
14408 return ix86_expand_store_builtin (CODE_FOR_sse2_stored, arglist);
14410 case IX86_BUILTIN_MONITOR:
14411 arg0 = TREE_VALUE (arglist);
14412 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14413 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14414 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14415 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14416 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14418 op0 = copy_to_mode_reg (SImode, op0);
14420 op1 = copy_to_mode_reg (SImode, op1);
14422 op2 = copy_to_mode_reg (SImode, op2);
14423 emit_insn (gen_sse3_monitor (op0, op1, op2));
14426 case IX86_BUILTIN_MWAIT:
14427 arg0 = TREE_VALUE (arglist);
14428 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14429 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14430 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14432 op0 = copy_to_mode_reg (SImode, op0);
14434 op1 = copy_to_mode_reg (SImode, op1);
14435 emit_insn (gen_sse3_mwait (op0, op1));
14438 case IX86_BUILTIN_LDDQU:
14439 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
14442 case IX86_BUILTIN_VEC_INIT_V2SI:
14443 case IX86_BUILTIN_VEC_INIT_V4HI:
14444 case IX86_BUILTIN_VEC_INIT_V8QI:
14445 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
14447 case IX86_BUILTIN_VEC_EXT_V2DF:
14448 case IX86_BUILTIN_VEC_EXT_V2DI:
14449 case IX86_BUILTIN_VEC_EXT_V4SF:
14450 case IX86_BUILTIN_VEC_EXT_V8HI:
14451 case IX86_BUILTIN_VEC_EXT_V4HI:
14452 return ix86_expand_vec_ext_builtin (arglist, target);
14454 case IX86_BUILTIN_VEC_SET_V8HI:
14455 case IX86_BUILTIN_VEC_SET_V4HI:
14456 return ix86_expand_vec_set_builtin (arglist);
14462 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14463 if (d->code == fcode)
14465 /* Compares are treated specially. */
14466 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14467 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
14468 || d->icode == CODE_FOR_sse2_maskcmpv2df3
14469 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14470 return ix86_expand_sse_compare (d, arglist, target);
14472 return ix86_expand_binop_builtin (d->icode, arglist, target);
14475 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
14476 if (d->code == fcode)
14477 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
14479 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14480 if (d->code == fcode)
14481 return ix86_expand_sse_comi (d, arglist, target);
14483 /* @@@ Should really do something sensible here. */
14487 /* Store OPERAND to the memory after reload is completed. This means
14488 that we can't easily use assign_stack_local. */
14490 ix86_force_to_memory (enum machine_mode mode, rtx operand)
14493 if (!reload_completed)
14495 if (TARGET_RED_ZONE)
14497 result = gen_rtx_MEM (mode,
14498 gen_rtx_PLUS (Pmode,
14500 GEN_INT (-RED_ZONE_SIZE)));
14501 emit_move_insn (result, operand);
14503 else if (!TARGET_RED_ZONE && TARGET_64BIT)
14509 operand = gen_lowpart (DImode, operand);
14513 gen_rtx_SET (VOIDmode,
14514 gen_rtx_MEM (DImode,
14515 gen_rtx_PRE_DEC (DImode,
14516 stack_pointer_rtx)),
14522 result = gen_rtx_MEM (mode, stack_pointer_rtx);
14531 split_di (&operand, 1, operands, operands + 1);
14533 gen_rtx_SET (VOIDmode,
14534 gen_rtx_MEM (SImode,
14535 gen_rtx_PRE_DEC (Pmode,
14536 stack_pointer_rtx)),
14539 gen_rtx_SET (VOIDmode,
14540 gen_rtx_MEM (SImode,
14541 gen_rtx_PRE_DEC (Pmode,
14542 stack_pointer_rtx)),
14547 /* It is better to store HImodes as SImodes. */
14548 if (!TARGET_PARTIAL_REG_STALL)
14549 operand = gen_lowpart (SImode, operand);
14553 gen_rtx_SET (VOIDmode,
14554 gen_rtx_MEM (GET_MODE (operand),
14555 gen_rtx_PRE_DEC (SImode,
14556 stack_pointer_rtx)),
14562 result = gen_rtx_MEM (mode, stack_pointer_rtx);
14567 /* Free operand from the memory. */
14569 ix86_free_from_memory (enum machine_mode mode)
14571 if (!TARGET_RED_ZONE)
14575 if (mode == DImode || TARGET_64BIT)
14577 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
14581 /* Use LEA to deallocate stack space. In peephole2 it will be converted
14582 to pop or add instruction if registers are available. */
14583 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14584 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
14589 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
14590 QImode must go into class Q_REGS.
14591 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
14592 movdf to do mem-to-mem moves through integer regs. */
14594 ix86_preferred_reload_class (rtx x, enum reg_class class)
14596 if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x)))
14598 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
14600 /* SSE can't load any constant directly yet. */
14601 if (SSE_CLASS_P (class))
14603 /* Floats can load 0 and 1. */
14604 if (MAYBE_FLOAT_CLASS_P (class) && standard_80387_constant_p (x))
14606 /* Limit class to non-SSE. Use GENERAL_REGS if possible. */
14607 if (MAYBE_SSE_CLASS_P (class))
14608 return (reg_class_subset_p (class, GENERAL_REGS)
14609 ? GENERAL_REGS : FLOAT_REGS);
14613 /* General regs can load everything. */
14614 if (reg_class_subset_p (class, GENERAL_REGS))
14615 return GENERAL_REGS;
14616 /* In case we haven't resolved FLOAT or SSE yet, give up. */
14617 if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class))
14620 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
14622 if (GET_MODE (x) == QImode && ! reg_class_subset_p (class, Q_REGS))
14627 /* If we are copying between general and FP registers, we need a memory
14628 location. The same is true for SSE and MMX registers.
14630 The macro can't work reliably when one of the CLASSES is class containing
14631 registers from multiple units (SSE, MMX, integer). We avoid this by never
14632 combining those units in single alternative in the machine description.
14633 Ensure that this constraint holds to avoid unexpected surprises.
14635 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
14636 enforce these sanity checks. */
14638 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
14639 enum machine_mode mode, int strict)
14641 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
14642 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
14643 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
14644 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
14645 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
14646 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
14653 return (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2)
14654 || ((SSE_CLASS_P (class1) != SSE_CLASS_P (class2)
14655 || MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
14656 && ((mode != SImode && (mode != DImode || !TARGET_64BIT))
14657 || (!TARGET_INTER_UNIT_MOVES && !optimize_size))));
14659 /* Return the cost of moving data from a register in class CLASS1 to
14660 one in class CLASS2.
14662 It is not required that the cost always equal 2 when FROM is the same as TO;
14663 on some machines it is expensive to move between registers if they are not
14664 general registers. */
14666 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
14667 enum reg_class class2)
14669 /* In case we require secondary memory, compute cost of the store followed
14670 by load. In order to avoid bad register allocation choices, we need
14671 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
14673 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
14677 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
14678 MEMORY_MOVE_COST (mode, class1, 1));
14679 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
14680 MEMORY_MOVE_COST (mode, class2, 1));
14682 /* In case of copying from general_purpose_register we may emit multiple
14683 stores followed by single load causing memory size mismatch stall.
14684 Count this as arbitrarily high cost of 20. */
14685 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
14688 /* In the case of FP/MMX moves, the registers actually overlap, and we
14689 have to switch modes in order to treat them differently. */
14690 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
14691 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
14697 /* Moves between SSE/MMX and integer unit are expensive. */
14698 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
14699 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
14700 return ix86_cost->mmxsse_to_integer;
14701 if (MAYBE_FLOAT_CLASS_P (class1))
14702 return ix86_cost->fp_move;
14703 if (MAYBE_SSE_CLASS_P (class1))
14704 return ix86_cost->sse_move;
14705 if (MAYBE_MMX_CLASS_P (class1))
14706 return ix86_cost->mmx_move;
14710 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
14712 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
14714 /* Flags and only flags can only hold CCmode values. */
14715 if (CC_REGNO_P (regno))
14716 return GET_MODE_CLASS (mode) == MODE_CC;
14717 if (GET_MODE_CLASS (mode) == MODE_CC
14718 || GET_MODE_CLASS (mode) == MODE_RANDOM
14719 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
14721 if (FP_REGNO_P (regno))
14722 return VALID_FP_MODE_P (mode);
14723 if (SSE_REGNO_P (regno))
14725 /* We implement the move patterns for all vector modes into and
14726 out of SSE registers, even when no operation instructions
14728 return (VALID_SSE_REG_MODE (mode)
14729 || VALID_SSE2_REG_MODE (mode)
14730 || VALID_MMX_REG_MODE (mode)
14731 || VALID_MMX_REG_MODE_3DNOW (mode));
14733 if (MMX_REGNO_P (regno))
14735 /* We implement the move patterns for 3DNOW modes even in MMX mode,
14736 so if the register is available at all, then we can move data of
14737 the given mode into or out of it. */
14738 return (VALID_MMX_REG_MODE (mode)
14739 || VALID_MMX_REG_MODE_3DNOW (mode));
14741 /* We handle both integer and floats in the general purpose registers.
14742 In future we should be able to handle vector modes as well. */
14743 if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode))
14745 /* Take care for QImode values - they can be in non-QI regs, but then
14746 they do cause partial register stalls. */
14747 if (regno < 4 || mode != QImode || TARGET_64BIT)
14749 return reload_in_progress || reload_completed || !TARGET_PARTIAL_REG_STALL;
14752 /* Return the cost of moving data of mode M between a
14753 register and memory. A value of 2 is the default; this cost is
14754 relative to those in `REGISTER_MOVE_COST'.
14756 If moving between registers and memory is more expensive than
14757 between two registers, you should define this macro to express the
14760 Model also increased moving costs of QImode registers in non
14764 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
14766 if (FLOAT_CLASS_P (class))
14783 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
14785 if (SSE_CLASS_P (class))
14788 switch (GET_MODE_SIZE (mode))
14802 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
14804 if (MMX_CLASS_P (class))
14807 switch (GET_MODE_SIZE (mode))
14818 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
14820 switch (GET_MODE_SIZE (mode))
14824 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
14825 : ix86_cost->movzbl_load);
14827 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
14828 : ix86_cost->int_store[0] + 4);
14831 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
14833 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
14834 if (mode == TFmode)
14836 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
14837 * (((int) GET_MODE_SIZE (mode)
14838 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
14842 /* Compute a (partial) cost for rtx X. Return true if the complete
14843 cost has been computed, and false if subexpressions should be
14844 scanned. In either case, *TOTAL contains the cost result. */
14847 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
14849 enum machine_mode mode = GET_MODE (x);
14857 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
14859 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
14861 else if (flag_pic && SYMBOLIC_CONST (x)
14863 || (!GET_CODE (x) != LABEL_REF
14864 && (GET_CODE (x) != SYMBOL_REF
14865 || !SYMBOL_REF_LOCAL_P (x)))))
14872 if (mode == VOIDmode)
14875 switch (standard_80387_constant_p (x))
14880 default: /* Other constants */
14885 /* Start with (MEM (SYMBOL_REF)), since that's where
14886 it'll probably end up. Add a penalty for size. */
14887 *total = (COSTS_N_INSNS (1)
14888 + (flag_pic != 0 && !TARGET_64BIT)
14889 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
14895 /* The zero extensions is often completely free on x86_64, so make
14896 it as cheap as possible. */
14897 if (TARGET_64BIT && mode == DImode
14898 && GET_MODE (XEXP (x, 0)) == SImode)
14900 else if (TARGET_ZERO_EXTEND_WITH_AND)
14901 *total = COSTS_N_INSNS (ix86_cost->add);
14903 *total = COSTS_N_INSNS (ix86_cost->movzx);
14907 *total = COSTS_N_INSNS (ix86_cost->movsx);
14911 if (GET_CODE (XEXP (x, 1)) == CONST_INT
14912 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
14914 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14917 *total = COSTS_N_INSNS (ix86_cost->add);
14920 if ((value == 2 || value == 3)
14921 && ix86_cost->lea <= ix86_cost->shift_const)
14923 *total = COSTS_N_INSNS (ix86_cost->lea);
14933 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
14935 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14937 if (INTVAL (XEXP (x, 1)) > 32)
14938 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
14940 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
14944 if (GET_CODE (XEXP (x, 1)) == AND)
14945 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
14947 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
14952 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14953 *total = COSTS_N_INSNS (ix86_cost->shift_const);
14955 *total = COSTS_N_INSNS (ix86_cost->shift_var);
14960 if (FLOAT_MODE_P (mode))
14962 *total = COSTS_N_INSNS (ix86_cost->fmul);
14967 rtx op0 = XEXP (x, 0);
14968 rtx op1 = XEXP (x, 1);
14970 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14972 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14973 for (nbits = 0; value != 0; value &= value - 1)
14977 /* This is arbitrary. */
14980 /* Compute costs correctly for widening multiplication. */
14981 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
14982 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
14983 == GET_MODE_SIZE (mode))
14985 int is_mulwiden = 0;
14986 enum machine_mode inner_mode = GET_MODE (op0);
14988 if (GET_CODE (op0) == GET_CODE (op1))
14989 is_mulwiden = 1, op1 = XEXP (op1, 0);
14990 else if (GET_CODE (op1) == CONST_INT)
14992 if (GET_CODE (op0) == SIGN_EXTEND)
14993 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
14996 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
15000 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
15003 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
15004 + nbits * ix86_cost->mult_bit)
15005 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
15014 if (FLOAT_MODE_P (mode))
15015 *total = COSTS_N_INSNS (ix86_cost->fdiv);
15017 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
15021 if (FLOAT_MODE_P (mode))
15022 *total = COSTS_N_INSNS (ix86_cost->fadd);
15023 else if (GET_MODE_CLASS (mode) == MODE_INT
15024 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
15026 if (GET_CODE (XEXP (x, 0)) == PLUS
15027 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
15028 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
15029 && CONSTANT_P (XEXP (x, 1)))
15031 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
15032 if (val == 2 || val == 4 || val == 8)
15034 *total = COSTS_N_INSNS (ix86_cost->lea);
15035 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15036 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
15038 *total += rtx_cost (XEXP (x, 1), outer_code);
15042 else if (GET_CODE (XEXP (x, 0)) == MULT
15043 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
15045 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
15046 if (val == 2 || val == 4 || val == 8)
15048 *total = COSTS_N_INSNS (ix86_cost->lea);
15049 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15050 *total += rtx_cost (XEXP (x, 1), outer_code);
15054 else if (GET_CODE (XEXP (x, 0)) == PLUS)
15056 *total = COSTS_N_INSNS (ix86_cost->lea);
15057 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15058 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15059 *total += rtx_cost (XEXP (x, 1), outer_code);
15066 if (FLOAT_MODE_P (mode))
15068 *total = COSTS_N_INSNS (ix86_cost->fadd);
15076 if (!TARGET_64BIT && mode == DImode)
15078 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
15079 + (rtx_cost (XEXP (x, 0), outer_code)
15080 << (GET_MODE (XEXP (x, 0)) != DImode))
15081 + (rtx_cost (XEXP (x, 1), outer_code)
15082 << (GET_MODE (XEXP (x, 1)) != DImode)));
15088 if (FLOAT_MODE_P (mode))
15090 *total = COSTS_N_INSNS (ix86_cost->fchs);
15096 if (!TARGET_64BIT && mode == DImode)
15097 *total = COSTS_N_INSNS (ix86_cost->add * 2);
15099 *total = COSTS_N_INSNS (ix86_cost->add);
15103 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
15104 && XEXP (XEXP (x, 0), 1) == const1_rtx
15105 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
15106 && XEXP (x, 1) == const0_rtx)
15108 /* This kind of construct is implemented using test[bwl].
15109 Treat it as if we had an AND. */
15110 *total = (COSTS_N_INSNS (ix86_cost->add)
15111 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
15112 + rtx_cost (const1_rtx, outer_code));
15118 if (!TARGET_SSE_MATH
15120 || (mode == DFmode && !TARGET_SSE2))
15125 if (FLOAT_MODE_P (mode))
15126 *total = COSTS_N_INSNS (ix86_cost->fabs);
15130 if (FLOAT_MODE_P (mode))
15131 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
15135 if (XINT (x, 1) == UNSPEC_TP)
15146 static int current_machopic_label_num;
15148 /* Given a symbol name and its associated stub, write out the
15149 definition of the stub. */
15152 machopic_output_stub (FILE *file, const char *symb, const char *stub)
15154 unsigned int length;
15155 char *binder_name, *symbol_name, lazy_ptr_name[32];
15156 int label = ++current_machopic_label_num;
15158 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
15159 symb = (*targetm.strip_name_encoding) (symb);
15161 length = strlen (stub);
15162 binder_name = alloca (length + 32);
15163 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
15165 length = strlen (symb);
15166 symbol_name = alloca (length + 32);
15167 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
15169 sprintf (lazy_ptr_name, "L%d$lz", label);
15172 machopic_picsymbol_stub_section ();
15174 machopic_symbol_stub_section ();
15176 fprintf (file, "%s:\n", stub);
15177 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
15181 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
15182 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
15183 fprintf (file, "\tjmp %%edx\n");
15186 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
15188 fprintf (file, "%s:\n", binder_name);
15192 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
15193 fprintf (file, "\tpushl %%eax\n");
15196 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
15198 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
15200 machopic_lazy_symbol_ptr_section ();
15201 fprintf (file, "%s:\n", lazy_ptr_name);
15202 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
15203 fprintf (file, "\t.long %s\n", binder_name);
15205 #endif /* TARGET_MACHO */
15207 /* Order the registers for register allocator. */
15210 x86_order_regs_for_local_alloc (void)
15215 /* First allocate the local general purpose registers. */
15216 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15217 if (GENERAL_REGNO_P (i) && call_used_regs[i])
15218 reg_alloc_order [pos++] = i;
15220 /* Global general purpose registers. */
15221 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15222 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
15223 reg_alloc_order [pos++] = i;
15225 /* x87 registers come first in case we are doing FP math
15227 if (!TARGET_SSE_MATH)
15228 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
15229 reg_alloc_order [pos++] = i;
15231 /* SSE registers. */
15232 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
15233 reg_alloc_order [pos++] = i;
15234 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
15235 reg_alloc_order [pos++] = i;
15237 /* x87 registers. */
15238 if (TARGET_SSE_MATH)
15239 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
15240 reg_alloc_order [pos++] = i;
15242 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
15243 reg_alloc_order [pos++] = i;
15245 /* Initialize the rest of array as we do not allocate some registers
15247 while (pos < FIRST_PSEUDO_REGISTER)
15248 reg_alloc_order [pos++] = 0;
15251 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
15252 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
15255 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
15256 struct attribute_spec.handler. */
15258 ix86_handle_struct_attribute (tree *node, tree name,
15259 tree args ATTRIBUTE_UNUSED,
15260 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
15263 if (DECL_P (*node))
15265 if (TREE_CODE (*node) == TYPE_DECL)
15266 type = &TREE_TYPE (*node);
15271 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
15272 || TREE_CODE (*type) == UNION_TYPE)))
15274 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
15275 *no_add_attrs = true;
15278 else if ((is_attribute_p ("ms_struct", name)
15279 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
15280 || ((is_attribute_p ("gcc_struct", name)
15281 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
15283 warning ("%qs incompatible attribute ignored",
15284 IDENTIFIER_POINTER (name));
15285 *no_add_attrs = true;
15292 ix86_ms_bitfield_layout_p (tree record_type)
15294 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
15295 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
15296 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
15299 /* Returns an expression indicating where the this parameter is
15300 located on entry to the FUNCTION. */
15303 x86_this_parameter (tree function)
15305 tree type = TREE_TYPE (function);
15309 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
15310 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
15313 if (ix86_function_regparm (type, function) > 0)
15317 parm = TYPE_ARG_TYPES (type);
15318 /* Figure out whether or not the function has a variable number of
15320 for (; parm; parm = TREE_CHAIN (parm))
15321 if (TREE_VALUE (parm) == void_type_node)
15323 /* If not, the this parameter is in the first argument. */
15327 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
15329 return gen_rtx_REG (SImode, regno);
15333 if (aggregate_value_p (TREE_TYPE (type), type))
15334 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
15336 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
15339 /* Determine whether x86_output_mi_thunk can succeed. */
15342 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
15343 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
15344 HOST_WIDE_INT vcall_offset, tree function)
15346 /* 64-bit can handle anything. */
15350 /* For 32-bit, everything's fine if we have one free register. */
15351 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
15354 /* Need a free register for vcall_offset. */
15358 /* Need a free register for GOT references. */
15359 if (flag_pic && !(*targetm.binds_local_p) (function))
15362 /* Otherwise ok. */
15366 /* Output the assembler code for a thunk function. THUNK_DECL is the
15367 declaration for the thunk function itself, FUNCTION is the decl for
15368 the target function. DELTA is an immediate constant offset to be
15369 added to THIS. If VCALL_OFFSET is nonzero, the word at
15370 *(*this + vcall_offset) should be added to THIS. */
15373 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
15374 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
15375 HOST_WIDE_INT vcall_offset, tree function)
15378 rtx this = x86_this_parameter (function);
15381 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
15382 pull it in now and let DELTA benefit. */
15385 else if (vcall_offset)
15387 /* Put the this parameter into %eax. */
15389 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
15390 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15393 this_reg = NULL_RTX;
15395 /* Adjust the this parameter by a fixed constant. */
15398 xops[0] = GEN_INT (delta);
15399 xops[1] = this_reg ? this_reg : this;
15402 if (!x86_64_general_operand (xops[0], DImode))
15404 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
15406 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
15410 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
15413 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
15416 /* Adjust the this parameter by a value stored in the vtable. */
15420 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
15423 int tmp_regno = 2 /* ECX */;
15424 if (lookup_attribute ("fastcall",
15425 TYPE_ATTRIBUTES (TREE_TYPE (function))))
15426 tmp_regno = 0 /* EAX */;
15427 tmp = gen_rtx_REG (SImode, tmp_regno);
15430 xops[0] = gen_rtx_MEM (Pmode, this_reg);
15433 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
15435 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15437 /* Adjust the this parameter. */
15438 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
15439 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
15441 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
15442 xops[0] = GEN_INT (vcall_offset);
15444 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
15445 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
15447 xops[1] = this_reg;
15449 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
15451 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
15454 /* If necessary, drop THIS back to its stack slot. */
15455 if (this_reg && this_reg != this)
15457 xops[0] = this_reg;
15459 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15462 xops[0] = XEXP (DECL_RTL (function), 0);
15465 if (!flag_pic || (*targetm.binds_local_p) (function))
15466 output_asm_insn ("jmp\t%P0", xops);
15469 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
15470 tmp = gen_rtx_CONST (Pmode, tmp);
15471 tmp = gen_rtx_MEM (QImode, tmp);
15473 output_asm_insn ("jmp\t%A0", xops);
15478 if (!flag_pic || (*targetm.binds_local_p) (function))
15479 output_asm_insn ("jmp\t%P0", xops);
15484 rtx sym_ref = XEXP (DECL_RTL (function), 0);
15485 tmp = (gen_rtx_SYMBOL_REF
15487 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
15488 tmp = gen_rtx_MEM (QImode, tmp);
15490 output_asm_insn ("jmp\t%0", xops);
15493 #endif /* TARGET_MACHO */
15495 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
15496 output_set_got (tmp);
15499 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
15500 output_asm_insn ("jmp\t{*}%1", xops);
15506 x86_file_start (void)
15508 default_file_start ();
15509 if (X86_FILE_START_VERSION_DIRECTIVE)
15510 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
15511 if (X86_FILE_START_FLTUSED)
15512 fputs ("\t.global\t__fltused\n", asm_out_file);
15513 if (ix86_asm_dialect == ASM_INTEL)
15514 fputs ("\t.intel_syntax\n", asm_out_file);
15518 x86_field_alignment (tree field, int computed)
15520 enum machine_mode mode;
15521 tree type = TREE_TYPE (field);
15523 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
15525 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
15526 ? get_inner_array_type (type) : type);
15527 if (mode == DFmode || mode == DCmode
15528 || GET_MODE_CLASS (mode) == MODE_INT
15529 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
15530 return MIN (32, computed);
15534 /* Output assembler code to FILE to increment profiler label # LABELNO
15535 for profiling a function entry. */
15537 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
15542 #ifndef NO_PROFILE_COUNTERS
15543 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
15545 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
15549 #ifndef NO_PROFILE_COUNTERS
15550 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
15552 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
15556 #ifndef NO_PROFILE_COUNTERS
15557 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
15558 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
15560 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
15564 #ifndef NO_PROFILE_COUNTERS
15565 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
15566 PROFILE_COUNT_REGISTER);
15568 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
15572 /* We don't have exact information about the insn sizes, but we may assume
15573 quite safely that we are informed about all 1 byte insns and memory
15574 address sizes. This is enough to eliminate unnecessary padding in
15578 min_insn_size (rtx insn)
15582 if (!INSN_P (insn) || !active_insn_p (insn))
15585 /* Discard alignments we've emit and jump instructions. */
15586 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
15587 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
15589 if (GET_CODE (insn) == JUMP_INSN
15590 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
15591 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
15594 /* Important case - calls are always 5 bytes.
15595 It is common to have many calls in the row. */
15596 if (GET_CODE (insn) == CALL_INSN
15597 && symbolic_reference_mentioned_p (PATTERN (insn))
15598 && !SIBLING_CALL_P (insn))
15600 if (get_attr_length (insn) <= 1)
15603 /* For normal instructions we may rely on the sizes of addresses
15604 and the presence of symbol to require 4 bytes of encoding.
15605 This is not the case for jumps where references are PC relative. */
15606 if (GET_CODE (insn) != JUMP_INSN)
15608 l = get_attr_length_address (insn);
15609 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
15618 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
15622 ix86_avoid_jump_misspredicts (void)
15624 rtx insn, start = get_insns ();
15625 int nbytes = 0, njumps = 0;
15628 /* Look for all minimal intervals of instructions containing 4 jumps.
15629 The intervals are bounded by START and INSN. NBYTES is the total
15630 size of instructions in the interval including INSN and not including
15631 START. When the NBYTES is smaller than 16 bytes, it is possible
15632 that the end of START and INSN ends up in the same 16byte page.
15634 The smallest offset in the page INSN can start is the case where START
15635 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
15636 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
15638 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
15641 nbytes += min_insn_size (insn);
15643 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
15644 INSN_UID (insn), min_insn_size (insn));
15645 if ((GET_CODE (insn) == JUMP_INSN
15646 && GET_CODE (PATTERN (insn)) != ADDR_VEC
15647 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
15648 || GET_CODE (insn) == CALL_INSN)
15655 start = NEXT_INSN (start);
15656 if ((GET_CODE (start) == JUMP_INSN
15657 && GET_CODE (PATTERN (start)) != ADDR_VEC
15658 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
15659 || GET_CODE (start) == CALL_INSN)
15660 njumps--, isjump = 1;
15663 nbytes -= min_insn_size (start);
15668 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
15669 INSN_UID (start), INSN_UID (insn), nbytes);
15671 if (njumps == 3 && isjump && nbytes < 16)
15673 int padsize = 15 - nbytes + min_insn_size (insn);
15676 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
15677 INSN_UID (insn), padsize);
15678 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
15683 /* AMD Athlon works faster
15684 when RET is not destination of conditional jump or directly preceded
15685 by other jump instruction. We avoid the penalty by inserting NOP just
15686 before the RET instructions in such cases. */
15688 ix86_pad_returns (void)
15693 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
15695 basic_block bb = e->src;
15696 rtx ret = BB_END (bb);
15698 bool replace = false;
15700 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
15701 || !maybe_hot_bb_p (bb))
15703 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
15704 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
15706 if (prev && GET_CODE (prev) == CODE_LABEL)
15711 FOR_EACH_EDGE (e, ei, bb->preds)
15712 if (EDGE_FREQUENCY (e) && e->src->index >= 0
15713 && !(e->flags & EDGE_FALLTHRU))
15718 prev = prev_active_insn (ret);
15720 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
15721 || GET_CODE (prev) == CALL_INSN))
15723 /* Empty functions get branch mispredict even when the jump destination
15724 is not visible to us. */
15725 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
15730 emit_insn_before (gen_return_internal_long (), ret);
15736 /* Implement machine specific optimizations. We implement padding of returns
15737 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
15741 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
15742 ix86_pad_returns ();
15743 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
15744 ix86_avoid_jump_misspredicts ();
15747 /* Return nonzero when QImode register that must be represented via REX prefix
15750 x86_extended_QIreg_mentioned_p (rtx insn)
15753 extract_insn_cached (insn);
15754 for (i = 0; i < recog_data.n_operands; i++)
15755 if (REG_P (recog_data.operand[i])
15756 && REGNO (recog_data.operand[i]) >= 4)
15761 /* Return nonzero when P points to register encoded via REX prefix.
15762 Called via for_each_rtx. */
15764 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
15766 unsigned int regno;
15769 regno = REGNO (*p);
15770 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
15773 /* Return true when INSN mentions register that must be encoded using REX
15776 x86_extended_reg_mentioned_p (rtx insn)
15778 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
15781 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
15782 optabs would emit if we didn't have TFmode patterns. */
15785 x86_emit_floatuns (rtx operands[2])
15787 rtx neglab, donelab, i0, i1, f0, in, out;
15788 enum machine_mode mode, inmode;
15790 inmode = GET_MODE (operands[1]);
15791 if (inmode != SImode
15792 && inmode != DImode)
15796 in = force_reg (inmode, operands[1]);
15797 mode = GET_MODE (out);
15798 neglab = gen_label_rtx ();
15799 donelab = gen_label_rtx ();
15800 i1 = gen_reg_rtx (Pmode);
15801 f0 = gen_reg_rtx (mode);
15803 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
15805 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
15806 emit_jump_insn (gen_jump (donelab));
15809 emit_label (neglab);
15811 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15812 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15813 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
15814 expand_float (f0, i0, 0);
15815 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
15817 emit_label (donelab);
15820 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15821 with all elements equal to VAR. Return true if successful. */
15824 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
15825 rtx target, rtx val)
15827 enum machine_mode smode, wsmode, wvmode;
15834 if (!mmx_ok && !TARGET_SSE)
15842 val = force_reg (GET_MODE_INNER (mode), val);
15843 x = gen_rtx_VEC_DUPLICATE (mode, val);
15844 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15850 val = gen_lowpart (SImode, val);
15851 x = gen_rtx_TRUNCATE (HImode, val);
15852 x = gen_rtx_VEC_DUPLICATE (mode, x);
15853 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15874 /* Replicate the value once into the next wider mode and recurse. */
15875 val = convert_modes (wsmode, smode, val, true);
15876 x = expand_simple_binop (wsmode, ASHIFT, val,
15877 GEN_INT (GET_MODE_BITSIZE (smode)),
15878 NULL_RTX, 1, OPTAB_LIB_WIDEN);
15879 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
15881 x = gen_reg_rtx (wvmode);
15882 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
15883 gcc_unreachable ();
15884 emit_move_insn (target, gen_lowpart (mode, x));
15892 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15893 whose low element is VAR, and other elements are zero. Return true
15897 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
15898 rtx target, rtx var)
15900 enum machine_mode vsimode;
15907 if (!mmx_ok && !TARGET_SSE)
15913 var = force_reg (GET_MODE_INNER (mode), var);
15914 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
15915 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15920 var = force_reg (GET_MODE_INNER (mode), var);
15921 x = gen_rtx_VEC_DUPLICATE (mode, var);
15922 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
15923 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15928 vsimode = V4SImode;
15934 vsimode = V2SImode;
15937 /* Zero extend the variable element to SImode and recurse. */
15938 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
15940 x = gen_reg_rtx (vsimode);
15941 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
15942 gcc_unreachable ();
15944 emit_move_insn (target, gen_lowpart (mode, x));
15952 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15953 consisting of the values in VALS. It is known that all elements
15954 except ONE_VAR are constants. Return true if successful. */
15957 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
15958 rtx target, rtx vals, int one_var)
15960 rtx var = XVECEXP (vals, 0, one_var);
15961 enum machine_mode wmode;
15964 XVECEXP (vals, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
15965 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
15973 /* For the two element vectors, it's just as easy to use
15974 the general case. */
15990 /* There's no way to set one QImode entry easily. Combine
15991 the variable value with its adjacent constant value, and
15992 promote to an HImode set. */
15993 x = XVECEXP (vals, 0, one_var ^ 1);
15996 var = convert_modes (HImode, QImode, var, true);
15997 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
15998 NULL_RTX, 1, OPTAB_LIB_WIDEN);
15999 x = GEN_INT (INTVAL (x) & 0xff);
16003 var = convert_modes (HImode, QImode, var, true);
16004 x = gen_int_mode (INTVAL (x) << 8, HImode);
16006 if (x != const0_rtx)
16007 var = expand_simple_binop (HImode, IOR, var, x, var,
16008 1, OPTAB_LIB_WIDEN);
16010 x = gen_reg_rtx (wmode);
16011 emit_move_insn (x, gen_lowpart (wmode, const_vec));
16012 ix86_expand_vector_set (mmx_ok, target, var, one_var >> 1);
16014 emit_move_insn (target, gen_lowpart (mode, x));
16021 emit_move_insn (target, const_vec);
16022 ix86_expand_vector_set (mmx_ok, target, var, one_var);
16026 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
16027 all values variable, and none identical. */
16030 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
16031 rtx target, rtx vals)
16033 enum machine_mode half_mode = GET_MODE_INNER (mode);
16034 rtx op0 = NULL, op1 = NULL;
16035 bool use_vec_concat = false;
16041 if (!mmx_ok && !TARGET_SSE)
16047 /* For the two element vectors, we always implement VEC_CONCAT. */
16048 op0 = XVECEXP (vals, 0, 0);
16049 op1 = XVECEXP (vals, 0, 1);
16050 use_vec_concat = true;
16054 half_mode = V2SFmode;
16057 half_mode = V2SImode;
16063 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
16064 Recurse to load the two halves. */
16066 op0 = gen_reg_rtx (half_mode);
16067 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
16068 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
16070 op1 = gen_reg_rtx (half_mode);
16071 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
16072 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
16074 use_vec_concat = true;
16085 gcc_unreachable ();
16088 if (use_vec_concat)
16090 if (!register_operand (op0, half_mode))
16091 op0 = force_reg (half_mode, op0);
16092 if (!register_operand (op1, half_mode))
16093 op1 = force_reg (half_mode, op1);
16095 emit_insn (gen_rtx_SET (VOIDmode, target,
16096 gen_rtx_VEC_CONCAT (mode, op0, op1)));
16100 int i, j, n_elts, n_words, n_elt_per_word;
16101 enum machine_mode inner_mode;
16102 rtx words[4], shift;
16104 inner_mode = GET_MODE_INNER (mode);
16105 n_elts = GET_MODE_NUNITS (mode);
16106 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
16107 n_elt_per_word = n_elts / n_words;
16108 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
16110 for (i = 0; i < n_words; ++i)
16112 rtx word = NULL_RTX;
16114 for (j = 0; j < n_elt_per_word; ++j)
16116 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
16117 elt = convert_modes (word_mode, inner_mode, elt, true);
16123 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
16124 word, 1, OPTAB_LIB_WIDEN);
16125 word = expand_simple_binop (word_mode, IOR, word, elt,
16126 word, 1, OPTAB_LIB_WIDEN);
16134 emit_move_insn (target, gen_lowpart (mode, words[0]));
16135 else if (n_words == 2)
16137 rtx tmp = gen_reg_rtx (mode);
16138 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
16139 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
16140 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
16141 emit_move_insn (target, tmp);
16143 else if (n_words == 4)
16145 rtx tmp = gen_reg_rtx (V4SImode);
16146 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
16147 ix86_expand_vector_init_general (false, V4SImode, target, vals);
16148 emit_move_insn (target, gen_lowpart (mode, tmp));
16151 gcc_unreachable ();
16155 /* Initialize vector TARGET via VALS. Suppress the use of MMX
16156 instructions unless MMX_OK is true. */
16159 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
16161 enum machine_mode mode = GET_MODE (target);
16162 enum machine_mode inner_mode = GET_MODE_INNER (mode);
16163 int n_elts = GET_MODE_NUNITS (mode);
16164 int n_var = 0, one_var = -1;
16165 bool all_same = true, all_const_zero = true;
16169 for (i = 0; i < n_elts; ++i)
16171 x = XVECEXP (vals, 0, i);
16172 if (!CONSTANT_P (x))
16173 n_var++, one_var = i;
16174 else if (x != CONST0_RTX (inner_mode))
16175 all_const_zero = false;
16176 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
16180 /* Constants are best loaded from the constant pool. */
16183 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
16187 /* If all values are identical, broadcast the value. */
16189 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
16190 XVECEXP (vals, 0, 0)))
16193 /* Values where only one field is non-constant are best loaded from
16194 the pool and overwritten via move later. */
16197 if (all_const_zero && one_var == 0
16198 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
16199 XVECEXP (vals, 0, 0)))
16202 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
16206 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
16210 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
16212 enum machine_mode mode = GET_MODE (target);
16213 enum machine_mode inner_mode = GET_MODE_INNER (mode);
16214 bool use_vec_merge = false;
16230 /* For the two element vectors, we implement a VEC_CONCAT with
16231 the extraction of the other element. */
16233 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
16234 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
16237 op0 = val, op1 = tmp;
16239 op0 = tmp, op1 = val;
16241 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
16242 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
16250 use_vec_merge = true;
16254 /* tmp = op0 = A B C D */
16255 tmp = copy_to_reg (target);
16257 /* op0 = C C D D */
16258 emit_insn (gen_sse_unpcklps (target, target, target));
16260 /* op0 = C C D X */
16261 ix86_expand_vector_set (false, target, val, 0);
16263 /* op0 = A B X D */
16264 emit_insn (gen_sse_shufps_1 (target, target, tmp,
16265 GEN_INT (1), GEN_INT (0),
16266 GEN_INT (2), GEN_INT (3)));
16270 tmp = copy_to_reg (target);
16271 ix86_expand_vector_set (false, target, val, 0);
16272 emit_insn (gen_sse_shufps_1 (target, target, tmp,
16273 GEN_INT (0), GEN_INT (1),
16274 GEN_INT (0), GEN_INT (3)));
16278 tmp = copy_to_reg (target);
16279 ix86_expand_vector_set (false, target, val, 0);
16280 emit_insn (gen_sse_shufps_1 (target, target, tmp,
16281 GEN_INT (0), GEN_INT (1),
16282 GEN_INT (2), GEN_INT (0)));
16286 gcc_unreachable ();
16291 /* Element 0 handled by vec_merge below. */
16294 use_vec_merge = true;
16300 /* With SSE2, use integer shuffles to swap element 0 and ELT,
16301 store into element 0, then shuffle them back. */
16305 order[0] = GEN_INT (elt);
16306 order[1] = const1_rtx;
16307 order[2] = const2_rtx;
16308 order[3] = GEN_INT (3);
16309 order[elt] = const0_rtx;
16311 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
16312 order[1], order[2], order[3]));
16314 ix86_expand_vector_set (false, target, val, 0);
16316 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
16317 order[1], order[2], order[3]));
16321 /* For SSE1, we have to reuse the V4SF code. */
16322 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
16323 gen_lowpart (SFmode, val), elt);
16328 use_vec_merge = TARGET_SSE2;
16331 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
16342 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
16343 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
16344 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
16348 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
16350 emit_move_insn (mem, target);
16352 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
16353 emit_move_insn (tmp, val);
16355 emit_move_insn (target, mem);
16360 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
16362 enum machine_mode mode = GET_MODE (vec);
16363 enum machine_mode inner_mode = GET_MODE_INNER (mode);
16364 bool use_vec_extr = false;
16377 use_vec_extr = true;
16389 tmp = gen_reg_rtx (mode);
16390 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
16391 GEN_INT (elt), GEN_INT (elt),
16392 GEN_INT (elt), GEN_INT (elt)));
16396 tmp = gen_reg_rtx (mode);
16397 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
16401 gcc_unreachable ();
16404 use_vec_extr = true;
16418 tmp = gen_reg_rtx (mode);
16419 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
16420 GEN_INT (elt), GEN_INT (elt),
16421 GEN_INT (elt), GEN_INT (elt)));
16425 tmp = gen_reg_rtx (mode);
16426 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
16430 gcc_unreachable ();
16433 use_vec_extr = true;
16437 /* For SSE1, we have to reuse the V4SF code. */
16438 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
16439 gen_lowpart (V4SFmode, vec), elt);
16445 use_vec_extr = TARGET_SSE2;
16448 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
16453 /* ??? Could extract the appropriate HImode element and shift. */
16460 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
16461 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
16463 /* Let the rtl optimizers know about the zero extension performed. */
16464 if (inner_mode == HImode)
16466 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
16467 target = gen_lowpart (SImode, target);
16470 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
16474 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
16476 emit_move_insn (mem, vec);
16478 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
16479 emit_move_insn (target, tmp);
16483 /* Implements target hook vector_mode_supported_p. */
16485 ix86_vector_mode_supported_p (enum machine_mode mode)
16487 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
16489 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
16491 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
16493 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
16498 /* Worker function for TARGET_MD_ASM_CLOBBERS.
16500 We do this in the new i386 backend to maintain source compatibility
16501 with the old cc0-based compiler. */
16504 ix86_md_asm_clobbers (tree clobbers)
16506 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
16508 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
16510 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
16515 /* Worker function for REVERSE_CONDITION. */
16518 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
16520 return (mode != CCFPmode && mode != CCFPUmode
16521 ? reverse_condition (code)
16522 : reverse_condition_maybe_unordered (code));
16525 /* Output code to perform an x87 FP register move, from OPERANDS[1]
16529 output_387_reg_move (rtx insn, rtx *operands)
16531 if (REG_P (operands[1])
16532 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
16534 if (REGNO (operands[0]) == FIRST_STACK_REG
16535 && TARGET_USE_FFREEP)
16536 return "ffreep\t%y0";
16537 return "fstp\t%y0";
16539 if (STACK_TOP_P (operands[0]))
16540 return "fld%z1\t%y1";
16544 /* Output code to perform a conditional jump to LABEL, if C2 flag in
16545 FP status register is set. */
16548 ix86_emit_fp_unordered_jump (rtx label)
16550 rtx reg = gen_reg_rtx (HImode);
16553 emit_insn (gen_x86_fnstsw_1 (reg));
16555 if (TARGET_USE_SAHF)
16557 emit_insn (gen_x86_sahf_1 (reg));
16559 temp = gen_rtx_REG (CCmode, FLAGS_REG);
16560 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
16564 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
16566 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
16567 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
16570 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
16571 gen_rtx_LABEL_REF (VOIDmode, label),
16573 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
16574 emit_jump_insn (temp);
16577 /* Output code to perform a log1p XFmode calculation. */
16579 void ix86_emit_i387_log1p (rtx op0, rtx op1)
16581 rtx label1 = gen_label_rtx ();
16582 rtx label2 = gen_label_rtx ();
16584 rtx tmp = gen_reg_rtx (XFmode);
16585 rtx tmp2 = gen_reg_rtx (XFmode);
16587 emit_insn (gen_absxf2 (tmp, op1));
16588 emit_insn (gen_cmpxf (tmp,
16589 CONST_DOUBLE_FROM_REAL_VALUE (
16590 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
16592 emit_jump_insn (gen_bge (label1));
16594 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
16595 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
16596 emit_jump (label2);
16598 emit_label (label1);
16599 emit_move_insn (tmp, CONST1_RTX (XFmode));
16600 emit_insn (gen_addxf3 (tmp, op1, tmp));
16601 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
16602 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
16604 emit_label (label2);
16607 /* Solaris named-section hook. Parameters are as for
16608 named_section_real. */
16611 i386_solaris_elf_named_section (const char *name, unsigned int flags,
16614 /* With Binutils 2.15, the "@unwind" marker must be specified on
16615 every occurrence of the ".eh_frame" section, not just the first
16618 && strcmp (name, ".eh_frame") == 0)
16620 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
16621 flags & SECTION_WRITE ? "aw" : "a");
16624 default_elf_asm_named_section (name, flags, decl);
16627 #include "gt-i386.h"