1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
50 #include "tree-gimple.h"
53 #ifndef CHECK_STACK_LIMIT
54 #define CHECK_STACK_LIMIT (-1)
57 /* Return index of given mode in mult and division cost tables. */
58 #define MODE_INDEX(mode) \
59 ((mode) == QImode ? 0 \
60 : (mode) == HImode ? 1 \
61 : (mode) == SImode ? 2 \
62 : (mode) == DImode ? 3 \
65 /* Processor costs (relative to an add) */
67 struct processor_costs size_cost = { /* costs for tunning for size */
68 2, /* cost of an add instruction */
69 3, /* cost of a lea instruction */
70 2, /* variable shift costs */
71 3, /* constant shift costs */
72 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
73 0, /* cost of multiply per each bit set */
74 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
75 3, /* cost of movsx */
76 3, /* cost of movzx */
79 2, /* cost for loading QImode using movzbl */
80 {2, 2, 2}, /* cost of loading integer registers
81 in QImode, HImode and SImode.
82 Relative to reg-reg move (2). */
83 {2, 2, 2}, /* cost of storing integer registers */
84 2, /* cost of reg,reg fld/fst */
85 {2, 2, 2}, /* cost of loading fp registers
86 in SFmode, DFmode and XFmode */
87 {2, 2, 2}, /* cost of loading integer registers */
88 3, /* cost of moving MMX register */
89 {3, 3}, /* cost of loading MMX registers
90 in SImode and DImode */
91 {3, 3}, /* cost of storing MMX registers
92 in SImode and DImode */
93 3, /* cost of moving SSE register */
94 {3, 3, 3}, /* cost of loading SSE registers
95 in SImode, DImode and TImode */
96 {3, 3, 3}, /* cost of storing SSE registers
97 in SImode, DImode and TImode */
98 3, /* MMX or SSE register to integer */
99 0, /* size of prefetch block */
100 0, /* number of parallel prefetches */
102 2, /* cost of FADD and FSUB insns. */
103 2, /* cost of FMUL instruction. */
104 2, /* cost of FDIV instruction. */
105 2, /* cost of FABS instruction. */
106 2, /* cost of FCHS instruction. */
107 2, /* cost of FSQRT instruction. */
110 /* Processor costs (relative to an add) */
112 struct processor_costs i386_cost = { /* 386 specific costs */
113 1, /* cost of an add instruction */
114 1, /* cost of a lea instruction */
115 3, /* variable shift costs */
116 2, /* constant shift costs */
117 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
118 1, /* cost of multiply per each bit set */
119 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
120 3, /* cost of movsx */
121 2, /* cost of movzx */
122 15, /* "large" insn */
124 4, /* cost for loading QImode using movzbl */
125 {2, 4, 2}, /* cost of loading integer registers
126 in QImode, HImode and SImode.
127 Relative to reg-reg move (2). */
128 {2, 4, 2}, /* cost of storing integer registers */
129 2, /* cost of reg,reg fld/fst */
130 {8, 8, 8}, /* cost of loading fp registers
131 in SFmode, DFmode and XFmode */
132 {8, 8, 8}, /* cost of loading integer registers */
133 2, /* cost of moving MMX register */
134 {4, 8}, /* cost of loading MMX registers
135 in SImode and DImode */
136 {4, 8}, /* cost of storing MMX registers
137 in SImode and DImode */
138 2, /* cost of moving SSE register */
139 {4, 8, 16}, /* cost of loading SSE registers
140 in SImode, DImode and TImode */
141 {4, 8, 16}, /* cost of storing SSE registers
142 in SImode, DImode and TImode */
143 3, /* MMX or SSE register to integer */
144 0, /* size of prefetch block */
145 0, /* number of parallel prefetches */
147 23, /* cost of FADD and FSUB insns. */
148 27, /* cost of FMUL instruction. */
149 88, /* cost of FDIV instruction. */
150 22, /* cost of FABS instruction. */
151 24, /* cost of FCHS instruction. */
152 122, /* cost of FSQRT instruction. */
156 struct processor_costs i486_cost = { /* 486 specific costs */
157 1, /* cost of an add instruction */
158 1, /* cost of a lea instruction */
159 3, /* variable shift costs */
160 2, /* constant shift costs */
161 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
162 1, /* cost of multiply per each bit set */
163 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
164 3, /* cost of movsx */
165 2, /* cost of movzx */
166 15, /* "large" insn */
168 4, /* cost for loading QImode using movzbl */
169 {2, 4, 2}, /* cost of loading integer registers
170 in QImode, HImode and SImode.
171 Relative to reg-reg move (2). */
172 {2, 4, 2}, /* cost of storing integer registers */
173 2, /* cost of reg,reg fld/fst */
174 {8, 8, 8}, /* cost of loading fp registers
175 in SFmode, DFmode and XFmode */
176 {8, 8, 8}, /* cost of loading integer registers */
177 2, /* cost of moving MMX register */
178 {4, 8}, /* cost of loading MMX registers
179 in SImode and DImode */
180 {4, 8}, /* cost of storing MMX registers
181 in SImode and DImode */
182 2, /* cost of moving SSE register */
183 {4, 8, 16}, /* cost of loading SSE registers
184 in SImode, DImode and TImode */
185 {4, 8, 16}, /* cost of storing SSE registers
186 in SImode, DImode and TImode */
187 3, /* MMX or SSE register to integer */
188 0, /* size of prefetch block */
189 0, /* number of parallel prefetches */
191 8, /* cost of FADD and FSUB insns. */
192 16, /* cost of FMUL instruction. */
193 73, /* cost of FDIV instruction. */
194 3, /* cost of FABS instruction. */
195 3, /* cost of FCHS instruction. */
196 83, /* cost of FSQRT instruction. */
200 struct processor_costs pentium_cost = {
201 1, /* cost of an add instruction */
202 1, /* cost of a lea instruction */
203 4, /* variable shift costs */
204 1, /* constant shift costs */
205 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
206 0, /* cost of multiply per each bit set */
207 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
208 3, /* cost of movsx */
209 2, /* cost of movzx */
210 8, /* "large" insn */
212 6, /* cost for loading QImode using movzbl */
213 {2, 4, 2}, /* cost of loading integer registers
214 in QImode, HImode and SImode.
215 Relative to reg-reg move (2). */
216 {2, 4, 2}, /* cost of storing integer registers */
217 2, /* cost of reg,reg fld/fst */
218 {2, 2, 6}, /* cost of loading fp registers
219 in SFmode, DFmode and XFmode */
220 {4, 4, 6}, /* cost of loading integer registers */
221 8, /* cost of moving MMX register */
222 {8, 8}, /* cost of loading MMX registers
223 in SImode and DImode */
224 {8, 8}, /* cost of storing MMX registers
225 in SImode and DImode */
226 2, /* cost of moving SSE register */
227 {4, 8, 16}, /* cost of loading SSE registers
228 in SImode, DImode and TImode */
229 {4, 8, 16}, /* cost of storing SSE registers
230 in SImode, DImode and TImode */
231 3, /* MMX or SSE register to integer */
232 0, /* size of prefetch block */
233 0, /* number of parallel prefetches */
235 3, /* cost of FADD and FSUB insns. */
236 3, /* cost of FMUL instruction. */
237 39, /* cost of FDIV instruction. */
238 1, /* cost of FABS instruction. */
239 1, /* cost of FCHS instruction. */
240 70, /* cost of FSQRT instruction. */
244 struct processor_costs pentiumpro_cost = {
245 1, /* cost of an add instruction */
246 1, /* cost of a lea instruction */
247 1, /* variable shift costs */
248 1, /* constant shift costs */
249 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
250 0, /* cost of multiply per each bit set */
251 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
252 1, /* cost of movsx */
253 1, /* cost of movzx */
254 8, /* "large" insn */
256 2, /* cost for loading QImode using movzbl */
257 {4, 4, 4}, /* cost of loading integer registers
258 in QImode, HImode and SImode.
259 Relative to reg-reg move (2). */
260 {2, 2, 2}, /* cost of storing integer registers */
261 2, /* cost of reg,reg fld/fst */
262 {2, 2, 6}, /* cost of loading fp registers
263 in SFmode, DFmode and XFmode */
264 {4, 4, 6}, /* cost of loading integer registers */
265 2, /* cost of moving MMX register */
266 {2, 2}, /* cost of loading MMX registers
267 in SImode and DImode */
268 {2, 2}, /* cost of storing MMX registers
269 in SImode and DImode */
270 2, /* cost of moving SSE register */
271 {2, 2, 8}, /* cost of loading SSE registers
272 in SImode, DImode and TImode */
273 {2, 2, 8}, /* cost of storing SSE registers
274 in SImode, DImode and TImode */
275 3, /* MMX or SSE register to integer */
276 32, /* size of prefetch block */
277 6, /* number of parallel prefetches */
279 3, /* cost of FADD and FSUB insns. */
280 5, /* cost of FMUL instruction. */
281 56, /* cost of FDIV instruction. */
282 2, /* cost of FABS instruction. */
283 2, /* cost of FCHS instruction. */
284 56, /* cost of FSQRT instruction. */
288 struct processor_costs k6_cost = {
289 1, /* cost of an add instruction */
290 2, /* cost of a lea instruction */
291 1, /* variable shift costs */
292 1, /* constant shift costs */
293 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
294 0, /* cost of multiply per each bit set */
295 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
296 2, /* cost of movsx */
297 2, /* cost of movzx */
298 8, /* "large" insn */
300 3, /* cost for loading QImode using movzbl */
301 {4, 5, 4}, /* cost of loading integer registers
302 in QImode, HImode and SImode.
303 Relative to reg-reg move (2). */
304 {2, 3, 2}, /* cost of storing integer registers */
305 4, /* cost of reg,reg fld/fst */
306 {6, 6, 6}, /* cost of loading fp registers
307 in SFmode, DFmode and XFmode */
308 {4, 4, 4}, /* cost of loading integer registers */
309 2, /* cost of moving MMX register */
310 {2, 2}, /* cost of loading MMX registers
311 in SImode and DImode */
312 {2, 2}, /* cost of storing MMX registers
313 in SImode and DImode */
314 2, /* cost of moving SSE register */
315 {2, 2, 8}, /* cost of loading SSE registers
316 in SImode, DImode and TImode */
317 {2, 2, 8}, /* cost of storing SSE registers
318 in SImode, DImode and TImode */
319 6, /* MMX or SSE register to integer */
320 32, /* size of prefetch block */
321 1, /* number of parallel prefetches */
323 2, /* cost of FADD and FSUB insns. */
324 2, /* cost of FMUL instruction. */
325 56, /* cost of FDIV instruction. */
326 2, /* cost of FABS instruction. */
327 2, /* cost of FCHS instruction. */
328 56, /* cost of FSQRT instruction. */
332 struct processor_costs athlon_cost = {
333 1, /* cost of an add instruction */
334 2, /* cost of a lea instruction */
335 1, /* variable shift costs */
336 1, /* constant shift costs */
337 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
338 0, /* cost of multiply per each bit set */
339 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
340 1, /* cost of movsx */
341 1, /* cost of movzx */
342 8, /* "large" insn */
344 4, /* cost for loading QImode using movzbl */
345 {3, 4, 3}, /* cost of loading integer registers
346 in QImode, HImode and SImode.
347 Relative to reg-reg move (2). */
348 {3, 4, 3}, /* cost of storing integer registers */
349 4, /* cost of reg,reg fld/fst */
350 {4, 4, 12}, /* cost of loading fp registers
351 in SFmode, DFmode and XFmode */
352 {6, 6, 8}, /* cost of loading integer registers */
353 2, /* cost of moving MMX register */
354 {4, 4}, /* cost of loading MMX registers
355 in SImode and DImode */
356 {4, 4}, /* cost of storing MMX registers
357 in SImode and DImode */
358 2, /* cost of moving SSE register */
359 {4, 4, 6}, /* cost of loading SSE registers
360 in SImode, DImode and TImode */
361 {4, 4, 5}, /* cost of storing SSE registers
362 in SImode, DImode and TImode */
363 5, /* MMX or SSE register to integer */
364 64, /* size of prefetch block */
365 6, /* number of parallel prefetches */
367 4, /* cost of FADD and FSUB insns. */
368 4, /* cost of FMUL instruction. */
369 24, /* cost of FDIV instruction. */
370 2, /* cost of FABS instruction. */
371 2, /* cost of FCHS instruction. */
372 35, /* cost of FSQRT instruction. */
376 struct processor_costs k8_cost = {
377 1, /* cost of an add instruction */
378 2, /* cost of a lea instruction */
379 1, /* variable shift costs */
380 1, /* constant shift costs */
381 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
382 0, /* cost of multiply per each bit set */
383 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
384 1, /* cost of movsx */
385 1, /* cost of movzx */
386 8, /* "large" insn */
388 4, /* cost for loading QImode using movzbl */
389 {3, 4, 3}, /* cost of loading integer registers
390 in QImode, HImode and SImode.
391 Relative to reg-reg move (2). */
392 {3, 4, 3}, /* cost of storing integer registers */
393 4, /* cost of reg,reg fld/fst */
394 {4, 4, 12}, /* cost of loading fp registers
395 in SFmode, DFmode and XFmode */
396 {6, 6, 8}, /* cost of loading integer registers */
397 2, /* cost of moving MMX register */
398 {3, 3}, /* cost of loading MMX registers
399 in SImode and DImode */
400 {4, 4}, /* cost of storing MMX registers
401 in SImode and DImode */
402 2, /* cost of moving SSE register */
403 {4, 3, 6}, /* cost of loading SSE registers
404 in SImode, DImode and TImode */
405 {4, 4, 5}, /* cost of storing SSE registers
406 in SImode, DImode and TImode */
407 5, /* MMX or SSE register to integer */
408 64, /* size of prefetch block */
409 6, /* number of parallel prefetches */
411 4, /* cost of FADD and FSUB insns. */
412 4, /* cost of FMUL instruction. */
413 19, /* cost of FDIV instruction. */
414 2, /* cost of FABS instruction. */
415 2, /* cost of FCHS instruction. */
416 35, /* cost of FSQRT instruction. */
420 struct processor_costs pentium4_cost = {
421 1, /* cost of an add instruction */
422 3, /* cost of a lea instruction */
423 4, /* variable shift costs */
424 4, /* constant shift costs */
425 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
426 0, /* cost of multiply per each bit set */
427 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
428 1, /* cost of movsx */
429 1, /* cost of movzx */
430 16, /* "large" insn */
432 2, /* cost for loading QImode using movzbl */
433 {4, 5, 4}, /* cost of loading integer registers
434 in QImode, HImode and SImode.
435 Relative to reg-reg move (2). */
436 {2, 3, 2}, /* cost of storing integer registers */
437 2, /* cost of reg,reg fld/fst */
438 {2, 2, 6}, /* cost of loading fp registers
439 in SFmode, DFmode and XFmode */
440 {4, 4, 6}, /* cost of loading integer registers */
441 2, /* cost of moving MMX register */
442 {2, 2}, /* cost of loading MMX registers
443 in SImode and DImode */
444 {2, 2}, /* cost of storing MMX registers
445 in SImode and DImode */
446 12, /* cost of moving SSE register */
447 {12, 12, 12}, /* cost of loading SSE registers
448 in SImode, DImode and TImode */
449 {2, 2, 8}, /* cost of storing SSE registers
450 in SImode, DImode and TImode */
451 10, /* MMX or SSE register to integer */
452 64, /* size of prefetch block */
453 6, /* number of parallel prefetches */
455 5, /* cost of FADD and FSUB insns. */
456 7, /* cost of FMUL instruction. */
457 43, /* cost of FDIV instruction. */
458 2, /* cost of FABS instruction. */
459 2, /* cost of FCHS instruction. */
460 43, /* cost of FSQRT instruction. */
464 struct processor_costs nocona_cost = {
465 1, /* cost of an add instruction */
466 1, /* cost of a lea instruction */
467 1, /* variable shift costs */
468 1, /* constant shift costs */
469 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
470 0, /* cost of multiply per each bit set */
471 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
472 1, /* cost of movsx */
473 1, /* cost of movzx */
474 16, /* "large" insn */
476 4, /* cost for loading QImode using movzbl */
477 {4, 4, 4}, /* cost of loading integer registers
478 in QImode, HImode and SImode.
479 Relative to reg-reg move (2). */
480 {4, 4, 4}, /* cost of storing integer registers */
481 3, /* cost of reg,reg fld/fst */
482 {12, 12, 12}, /* cost of loading fp registers
483 in SFmode, DFmode and XFmode */
484 {4, 4, 4}, /* cost of loading integer registers */
485 6, /* cost of moving MMX register */
486 {12, 12}, /* cost of loading MMX registers
487 in SImode and DImode */
488 {12, 12}, /* cost of storing MMX registers
489 in SImode and DImode */
490 6, /* cost of moving SSE register */
491 {12, 12, 12}, /* cost of loading SSE registers
492 in SImode, DImode and TImode */
493 {12, 12, 12}, /* cost of storing SSE registers
494 in SImode, DImode and TImode */
495 8, /* MMX or SSE register to integer */
496 128, /* size of prefetch block */
497 8, /* number of parallel prefetches */
499 6, /* cost of FADD and FSUB insns. */
500 8, /* cost of FMUL instruction. */
501 40, /* cost of FDIV instruction. */
502 3, /* cost of FABS instruction. */
503 3, /* cost of FCHS instruction. */
504 44, /* cost of FSQRT instruction. */
507 const struct processor_costs *ix86_cost = &pentium_cost;
509 /* Processor feature/optimization bitmasks. */
510 #define m_386 (1<<PROCESSOR_I386)
511 #define m_486 (1<<PROCESSOR_I486)
512 #define m_PENT (1<<PROCESSOR_PENTIUM)
513 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
514 #define m_K6 (1<<PROCESSOR_K6)
515 #define m_ATHLON (1<<PROCESSOR_ATHLON)
516 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
517 #define m_K8 (1<<PROCESSOR_K8)
518 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
519 #define m_NOCONA (1<<PROCESSOR_NOCONA)
521 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
522 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
523 const int x86_zero_extend_with_and = m_486 | m_PENT;
524 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
525 const int x86_double_with_add = ~m_386;
526 const int x86_use_bit_test = m_386;
527 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
528 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
529 const int x86_fisttp = m_NOCONA;
530 const int x86_3dnow_a = m_ATHLON_K8;
531 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
532 /* Branch hints were put in P4 based on simulation result. But
533 after P4 was made, no performance benefit was observed with
534 branch hints. It also increases the code size. As the result,
535 icc never generates branch hints. */
536 const int x86_branch_hints = 0;
537 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
538 const int x86_partial_reg_stall = m_PPRO;
539 const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
540 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
541 const int x86_use_mov0 = m_K6;
542 const int x86_use_cltd = ~(m_PENT | m_K6);
543 const int x86_read_modify_write = ~m_PENT;
544 const int x86_read_modify = ~(m_PENT | m_PPRO);
545 const int x86_split_long_moves = m_PPRO;
546 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
547 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
548 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
549 const int x86_qimode_math = ~(0);
550 const int x86_promote_qi_regs = 0;
551 const int x86_himode_math = ~(m_PPRO);
552 const int x86_promote_hi_regs = m_PPRO;
553 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
554 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
556 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
557 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
558 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
559 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
560 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
561 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
562 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
563 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
564 const int x86_shift1 = ~m_486;
565 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
566 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
567 /* Set for machines where the type and dependencies are resolved on SSE
568 register parts instead of whole registers, so we may maintain just
569 lower part of scalar values in proper format leaving the upper part
571 const int x86_sse_split_regs = m_ATHLON_K8;
572 const int x86_sse_typeless_stores = m_ATHLON_K8;
573 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
574 const int x86_use_ffreep = m_ATHLON_K8;
575 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
577 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
578 integer data in xmm registers. Which results in pretty abysmal code. */
579 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
581 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
582 /* Some CPU cores are not able to predict more than 4 branch instructions in
583 the 16 byte window. */
584 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
585 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
586 const int x86_use_bt = m_ATHLON_K8;
587 /* Compare and exchange was added for 80486. */
588 const int x86_cmpxchg = ~m_386;
589 /* Exchange and add was added for 80486. */
590 const int x86_xadd = ~m_386;
592 /* In case the average insn count for single function invocation is
593 lower than this constant, emit fast (but longer) prologue and
595 #define FAST_PROLOGUE_INSN_COUNT 20
597 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
598 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
599 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
600 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
602 /* Array of the smallest class containing reg number REGNO, indexed by
603 REGNO. Used by REGNO_REG_CLASS in i386.h. */
605 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
608 AREG, DREG, CREG, BREG,
610 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
612 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
613 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
616 /* flags, fpsr, dirflag, frame */
617 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
618 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
620 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
622 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
623 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
624 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
628 /* The "default" register map used in 32bit mode. */
630 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
632 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
633 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
634 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
635 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
636 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
637 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
638 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
641 static int const x86_64_int_parameter_registers[6] =
643 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
644 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
647 static int const x86_64_int_return_registers[4] =
649 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
652 /* The "default" register map used in 64bit mode. */
653 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
655 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
656 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
657 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
658 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
659 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
660 8,9,10,11,12,13,14,15, /* extended integer registers */
661 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
664 /* Define the register numbers to be used in Dwarf debugging information.
665 The SVR4 reference port C compiler uses the following register numbers
666 in its Dwarf output code:
667 0 for %eax (gcc regno = 0)
668 1 for %ecx (gcc regno = 2)
669 2 for %edx (gcc regno = 1)
670 3 for %ebx (gcc regno = 3)
671 4 for %esp (gcc regno = 7)
672 5 for %ebp (gcc regno = 6)
673 6 for %esi (gcc regno = 4)
674 7 for %edi (gcc regno = 5)
675 The following three DWARF register numbers are never generated by
676 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
677 believes these numbers have these meanings.
678 8 for %eip (no gcc equivalent)
679 9 for %eflags (gcc regno = 17)
680 10 for %trapno (no gcc equivalent)
681 It is not at all clear how we should number the FP stack registers
682 for the x86 architecture. If the version of SDB on x86/svr4 were
683 a bit less brain dead with respect to floating-point then we would
684 have a precedent to follow with respect to DWARF register numbers
685 for x86 FP registers, but the SDB on x86/svr4 is so completely
686 broken with respect to FP registers that it is hardly worth thinking
687 of it as something to strive for compatibility with.
688 The version of x86/svr4 SDB I have at the moment does (partially)
689 seem to believe that DWARF register number 11 is associated with
690 the x86 register %st(0), but that's about all. Higher DWARF
691 register numbers don't seem to be associated with anything in
692 particular, and even for DWARF regno 11, SDB only seems to under-
693 stand that it should say that a variable lives in %st(0) (when
694 asked via an `=' command) if we said it was in DWARF regno 11,
695 but SDB still prints garbage when asked for the value of the
696 variable in question (via a `/' command).
697 (Also note that the labels SDB prints for various FP stack regs
698 when doing an `x' command are all wrong.)
699 Note that these problems generally don't affect the native SVR4
700 C compiler because it doesn't allow the use of -O with -g and
701 because when it is *not* optimizing, it allocates a memory
702 location for each floating-point variable, and the memory
703 location is what gets described in the DWARF AT_location
704 attribute for the variable in question.
705 Regardless of the severe mental illness of the x86/svr4 SDB, we
706 do something sensible here and we use the following DWARF
707 register numbers. Note that these are all stack-top-relative
709 11 for %st(0) (gcc regno = 8)
710 12 for %st(1) (gcc regno = 9)
711 13 for %st(2) (gcc regno = 10)
712 14 for %st(3) (gcc regno = 11)
713 15 for %st(4) (gcc regno = 12)
714 16 for %st(5) (gcc regno = 13)
715 17 for %st(6) (gcc regno = 14)
716 18 for %st(7) (gcc regno = 15)
718 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
720 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
721 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
722 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
723 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
724 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
725 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
726 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
729 /* Test and compare insns in i386.md store the information needed to
730 generate branch and scc insns here. */
732 rtx ix86_compare_op0 = NULL_RTX;
733 rtx ix86_compare_op1 = NULL_RTX;
734 rtx ix86_compare_emitted = NULL_RTX;
736 /* Size of the register save area. */
737 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
739 /* Define the structure for the machine field in struct function. */
741 struct stack_local_entry GTY(())
746 struct stack_local_entry *next;
749 /* Structure describing stack frame layout.
750 Stack grows downward:
756 saved frame pointer if frame_pointer_needed
757 <- HARD_FRAME_POINTER
763 > to_allocate <- FRAME_POINTER
775 int outgoing_arguments_size;
778 HOST_WIDE_INT to_allocate;
779 /* The offsets relative to ARG_POINTER. */
780 HOST_WIDE_INT frame_pointer_offset;
781 HOST_WIDE_INT hard_frame_pointer_offset;
782 HOST_WIDE_INT stack_pointer_offset;
784 /* When save_regs_using_mov is set, emit prologue using
785 move instead of push instructions. */
786 bool save_regs_using_mov;
789 /* Code model option. */
790 enum cmodel ix86_cmodel;
792 enum asm_dialect ix86_asm_dialect = ASM_ATT;
794 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
796 /* Which unit we are generating floating point math for. */
797 enum fpmath_unit ix86_fpmath;
799 /* Which cpu are we scheduling for. */
800 enum processor_type ix86_tune;
801 /* Which instruction set architecture to use. */
802 enum processor_type ix86_arch;
804 /* true if sse prefetch instruction is not NOOP. */
805 int x86_prefetch_sse;
807 /* ix86_regparm_string as a number */
808 static int ix86_regparm;
810 /* Preferred alignment for stack boundary in bits. */
811 unsigned int ix86_preferred_stack_boundary;
813 /* Values 1-5: see jump.c */
814 int ix86_branch_cost;
816 /* Variables which are this size or smaller are put in the data/bss
817 or ldata/lbss sections. */
819 int ix86_section_threshold = 65536;
821 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
822 char internal_label_prefix[16];
823 int internal_label_prefix_len;
825 static bool ix86_handle_option (size_t, const char *, int);
826 static void output_pic_addr_const (FILE *, rtx, int);
827 static void put_condition_code (enum rtx_code, enum machine_mode,
829 static const char *get_some_local_dynamic_name (void);
830 static int get_some_local_dynamic_name_1 (rtx *, void *);
831 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
832 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
834 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
835 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
837 static rtx get_thread_pointer (int);
838 static rtx legitimize_tls_address (rtx, enum tls_model, int);
839 static void get_pc_thunk_name (char [32], unsigned int);
840 static rtx gen_push (rtx);
841 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
842 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
843 static struct machine_function * ix86_init_machine_status (void);
844 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
845 static int ix86_nsaved_regs (void);
846 static void ix86_emit_save_regs (void);
847 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
848 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
849 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
850 static HOST_WIDE_INT ix86_GOT_alias_set (void);
851 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
852 static rtx ix86_expand_aligntest (rtx, int);
853 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
854 static int ix86_issue_rate (void);
855 static int ix86_adjust_cost (rtx, rtx, rtx, int);
856 static int ia32_multipass_dfa_lookahead (void);
857 static void ix86_init_mmx_sse_builtins (void);
858 static rtx x86_this_parameter (tree);
859 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
860 HOST_WIDE_INT, tree);
861 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
862 static void x86_file_start (void);
863 static void ix86_reorg (void);
864 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
865 static tree ix86_build_builtin_va_list (void);
866 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
868 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
869 static bool ix86_vector_mode_supported_p (enum machine_mode);
871 static int ix86_address_cost (rtx);
872 static bool ix86_cannot_force_const_mem (rtx);
873 static rtx ix86_delegitimize_address (rtx);
875 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
877 struct builtin_description;
878 static rtx ix86_expand_sse_comi (const struct builtin_description *,
880 static rtx ix86_expand_sse_compare (const struct builtin_description *,
882 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
883 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
884 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
885 static rtx ix86_expand_store_builtin (enum insn_code, tree);
886 static rtx safe_vector_operand (rtx, enum machine_mode);
887 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
888 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
889 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
890 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
891 static int ix86_fp_comparison_cost (enum rtx_code code);
892 static unsigned int ix86_select_alt_pic_regnum (void);
893 static int ix86_save_reg (unsigned int, int);
894 static void ix86_compute_frame_layout (struct ix86_frame *);
895 static int ix86_comp_type_attributes (tree, tree);
896 static int ix86_function_regparm (tree, tree);
897 const struct attribute_spec ix86_attribute_table[];
898 static bool ix86_function_ok_for_sibcall (tree, tree);
899 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
900 static int ix86_value_regno (enum machine_mode, tree, tree);
901 static bool contains_128bit_aligned_vector_p (tree);
902 static rtx ix86_struct_value_rtx (tree, int);
903 static bool ix86_ms_bitfield_layout_p (tree);
904 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
905 static int extended_reg_mentioned_1 (rtx *, void *);
906 static bool ix86_rtx_costs (rtx, int, int, int *);
907 static int min_insn_size (rtx);
908 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
909 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
910 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
912 static void ix86_init_builtins (void);
913 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
914 static const char *ix86_mangle_fundamental_type (tree);
915 static tree ix86_stack_protect_fail (void);
916 static rtx ix86_internal_arg_pointer (void);
917 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
919 /* This function is only used on Solaris. */
920 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
923 /* Register class used for passing given 64bit part of the argument.
924 These represent classes as documented by the PS ABI, with the exception
925 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
926 use SF or DFmode move instead of DImode to avoid reformatting penalties.
928 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
929 whenever possible (upper half does contain padding).
931 enum x86_64_reg_class
934 X86_64_INTEGER_CLASS,
935 X86_64_INTEGERSI_CLASS,
942 X86_64_COMPLEX_X87_CLASS,
945 static const char * const x86_64_reg_class_name[] = {
946 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
947 "sseup", "x87", "x87up", "cplx87", "no"
950 #define MAX_CLASSES 4
952 /* Table of constants used by fldpi, fldln2, etc.... */
953 static REAL_VALUE_TYPE ext_80387_constants_table [5];
954 static bool ext_80387_constants_init = 0;
955 static void init_ext_80387_constants (void);
956 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
957 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
958 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
959 static void x86_64_elf_select_section (tree decl, int reloc,
960 unsigned HOST_WIDE_INT align)
963 /* Initialize the GCC target structure. */
964 #undef TARGET_ATTRIBUTE_TABLE
965 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
966 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
967 # undef TARGET_MERGE_DECL_ATTRIBUTES
968 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
971 #undef TARGET_COMP_TYPE_ATTRIBUTES
972 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
974 #undef TARGET_INIT_BUILTINS
975 #define TARGET_INIT_BUILTINS ix86_init_builtins
976 #undef TARGET_EXPAND_BUILTIN
977 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
979 #undef TARGET_ASM_FUNCTION_EPILOGUE
980 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
982 #undef TARGET_ENCODE_SECTION_INFO
983 #ifndef SUBTARGET_ENCODE_SECTION_INFO
984 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
986 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
989 #undef TARGET_ASM_OPEN_PAREN
990 #define TARGET_ASM_OPEN_PAREN ""
991 #undef TARGET_ASM_CLOSE_PAREN
992 #define TARGET_ASM_CLOSE_PAREN ""
994 #undef TARGET_ASM_ALIGNED_HI_OP
995 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
996 #undef TARGET_ASM_ALIGNED_SI_OP
997 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
999 #undef TARGET_ASM_ALIGNED_DI_OP
1000 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1003 #undef TARGET_ASM_UNALIGNED_HI_OP
1004 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1005 #undef TARGET_ASM_UNALIGNED_SI_OP
1006 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1007 #undef TARGET_ASM_UNALIGNED_DI_OP
1008 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1010 #undef TARGET_SCHED_ADJUST_COST
1011 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1012 #undef TARGET_SCHED_ISSUE_RATE
1013 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1014 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1015 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1016 ia32_multipass_dfa_lookahead
1018 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1019 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1022 #undef TARGET_HAVE_TLS
1023 #define TARGET_HAVE_TLS true
1025 #undef TARGET_CANNOT_FORCE_CONST_MEM
1026 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1028 #undef TARGET_DELEGITIMIZE_ADDRESS
1029 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1031 #undef TARGET_MS_BITFIELD_LAYOUT_P
1032 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1035 #undef TARGET_BINDS_LOCAL_P
1036 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1039 #undef TARGET_ASM_OUTPUT_MI_THUNK
1040 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1041 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1042 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1044 #undef TARGET_ASM_FILE_START
1045 #define TARGET_ASM_FILE_START x86_file_start
1047 #undef TARGET_DEFAULT_TARGET_FLAGS
1048 #define TARGET_DEFAULT_TARGET_FLAGS \
1050 | TARGET_64BIT_DEFAULT \
1051 | TARGET_SUBTARGET_DEFAULT \
1052 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1054 #undef TARGET_HANDLE_OPTION
1055 #define TARGET_HANDLE_OPTION ix86_handle_option
1057 #undef TARGET_RTX_COSTS
1058 #define TARGET_RTX_COSTS ix86_rtx_costs
1059 #undef TARGET_ADDRESS_COST
1060 #define TARGET_ADDRESS_COST ix86_address_cost
1062 #undef TARGET_FIXED_CONDITION_CODE_REGS
1063 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1064 #undef TARGET_CC_MODES_COMPATIBLE
1065 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1067 #undef TARGET_MACHINE_DEPENDENT_REORG
1068 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1070 #undef TARGET_BUILD_BUILTIN_VA_LIST
1071 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1073 #undef TARGET_MD_ASM_CLOBBERS
1074 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1076 #undef TARGET_PROMOTE_PROTOTYPES
1077 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1078 #undef TARGET_STRUCT_VALUE_RTX
1079 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1080 #undef TARGET_SETUP_INCOMING_VARARGS
1081 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1082 #undef TARGET_MUST_PASS_IN_STACK
1083 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1084 #undef TARGET_PASS_BY_REFERENCE
1085 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1086 #undef TARGET_INTERNAL_ARG_POINTER
1087 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1088 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1089 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1091 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1092 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1094 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1095 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1098 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1099 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1102 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1103 #undef TARGET_INSERT_ATTRIBUTES
1104 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1107 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1108 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1110 #undef TARGET_STACK_PROTECT_FAIL
1111 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1113 #undef TARGET_FUNCTION_VALUE
1114 #define TARGET_FUNCTION_VALUE ix86_function_value
1116 struct gcc_target targetm = TARGET_INITIALIZER;
1119 /* The svr4 ABI for the i386 says that records and unions are returned
1121 #ifndef DEFAULT_PCC_STRUCT_RETURN
1122 #define DEFAULT_PCC_STRUCT_RETURN 1
1125 /* Implement TARGET_HANDLE_OPTION. */
1128 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1135 target_flags &= ~MASK_3DNOW_A;
1136 target_flags_explicit |= MASK_3DNOW_A;
1143 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1144 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1151 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1152 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1159 target_flags &= ~MASK_SSE3;
1160 target_flags_explicit |= MASK_SSE3;
1169 /* Sometimes certain combinations of command options do not make
1170 sense on a particular target machine. You can define a macro
1171 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1172 defined, is executed once just after all the command options have
1175 Don't use this macro to turn on various extra optimizations for
1176 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1179 override_options (void)
1182 int ix86_tune_defaulted = 0;
1184 /* Comes from final.c -- no real reason to change it. */
1185 #define MAX_CODE_ALIGN 16
1189 const struct processor_costs *cost; /* Processor costs */
1190 const int target_enable; /* Target flags to enable. */
1191 const int target_disable; /* Target flags to disable. */
1192 const int align_loop; /* Default alignments. */
1193 const int align_loop_max_skip;
1194 const int align_jump;
1195 const int align_jump_max_skip;
1196 const int align_func;
1198 const processor_target_table[PROCESSOR_max] =
1200 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1201 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1202 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1203 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1204 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1205 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1206 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1207 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1208 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1211 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1214 const char *const name; /* processor name or nickname. */
1215 const enum processor_type processor;
1216 const enum pta_flags
1222 PTA_PREFETCH_SSE = 16,
1228 const processor_alias_table[] =
1230 {"i386", PROCESSOR_I386, 0},
1231 {"i486", PROCESSOR_I486, 0},
1232 {"i586", PROCESSOR_PENTIUM, 0},
1233 {"pentium", PROCESSOR_PENTIUM, 0},
1234 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1235 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1236 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1237 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1238 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1239 {"i686", PROCESSOR_PENTIUMPRO, 0},
1240 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1241 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1242 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1243 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1244 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1245 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1246 | PTA_MMX | PTA_PREFETCH_SSE},
1247 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1248 | PTA_MMX | PTA_PREFETCH_SSE},
1249 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1250 | PTA_MMX | PTA_PREFETCH_SSE},
1251 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1252 | PTA_MMX | PTA_PREFETCH_SSE},
1253 {"k6", PROCESSOR_K6, PTA_MMX},
1254 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1255 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1256 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1258 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1259 | PTA_3DNOW | PTA_3DNOW_A},
1260 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1261 | PTA_3DNOW_A | PTA_SSE},
1262 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1263 | PTA_3DNOW_A | PTA_SSE},
1264 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1265 | PTA_3DNOW_A | PTA_SSE},
1266 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1267 | PTA_SSE | PTA_SSE2 },
1268 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1269 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1270 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1271 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1272 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1273 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1274 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1275 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1278 int const pta_size = ARRAY_SIZE (processor_alias_table);
1280 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1281 SUBTARGET_OVERRIDE_OPTIONS;
1284 /* Set the default values for switches whose default depends on TARGET_64BIT
1285 in case they weren't overwritten by command line options. */
1288 if (flag_omit_frame_pointer == 2)
1289 flag_omit_frame_pointer = 1;
1290 if (flag_asynchronous_unwind_tables == 2)
1291 flag_asynchronous_unwind_tables = 1;
1292 if (flag_pcc_struct_return == 2)
1293 flag_pcc_struct_return = 0;
1297 if (flag_omit_frame_pointer == 2)
1298 flag_omit_frame_pointer = 0;
1299 if (flag_asynchronous_unwind_tables == 2)
1300 flag_asynchronous_unwind_tables = 0;
1301 if (flag_pcc_struct_return == 2)
1302 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1305 if (!ix86_tune_string && ix86_arch_string)
1306 ix86_tune_string = ix86_arch_string;
1307 if (!ix86_tune_string)
1309 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1310 ix86_tune_defaulted = 1;
1312 if (!ix86_arch_string)
1313 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1315 if (ix86_cmodel_string != 0)
1317 if (!strcmp (ix86_cmodel_string, "small"))
1318 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1319 else if (!strcmp (ix86_cmodel_string, "medium"))
1320 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1322 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1323 else if (!strcmp (ix86_cmodel_string, "32"))
1324 ix86_cmodel = CM_32;
1325 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1326 ix86_cmodel = CM_KERNEL;
1327 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1328 ix86_cmodel = CM_LARGE;
1330 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1334 ix86_cmodel = CM_32;
1336 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1338 if (ix86_asm_string != 0)
1341 && !strcmp (ix86_asm_string, "intel"))
1342 ix86_asm_dialect = ASM_INTEL;
1343 else if (!strcmp (ix86_asm_string, "att"))
1344 ix86_asm_dialect = ASM_ATT;
1346 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1348 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1349 error ("code model %qs not supported in the %s bit mode",
1350 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1351 if (ix86_cmodel == CM_LARGE)
1352 sorry ("code model %<large%> not supported yet");
1353 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1354 sorry ("%i-bit mode not compiled in",
1355 (target_flags & MASK_64BIT) ? 64 : 32);
1357 for (i = 0; i < pta_size; i++)
1358 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1360 ix86_arch = processor_alias_table[i].processor;
1361 /* Default cpu tuning to the architecture. */
1362 ix86_tune = ix86_arch;
1363 if (processor_alias_table[i].flags & PTA_MMX
1364 && !(target_flags_explicit & MASK_MMX))
1365 target_flags |= MASK_MMX;
1366 if (processor_alias_table[i].flags & PTA_3DNOW
1367 && !(target_flags_explicit & MASK_3DNOW))
1368 target_flags |= MASK_3DNOW;
1369 if (processor_alias_table[i].flags & PTA_3DNOW_A
1370 && !(target_flags_explicit & MASK_3DNOW_A))
1371 target_flags |= MASK_3DNOW_A;
1372 if (processor_alias_table[i].flags & PTA_SSE
1373 && !(target_flags_explicit & MASK_SSE))
1374 target_flags |= MASK_SSE;
1375 if (processor_alias_table[i].flags & PTA_SSE2
1376 && !(target_flags_explicit & MASK_SSE2))
1377 target_flags |= MASK_SSE2;
1378 if (processor_alias_table[i].flags & PTA_SSE3
1379 && !(target_flags_explicit & MASK_SSE3))
1380 target_flags |= MASK_SSE3;
1381 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1382 x86_prefetch_sse = true;
1383 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1384 error ("CPU you selected does not support x86-64 "
1390 error ("bad value (%s) for -march= switch", ix86_arch_string);
1392 for (i = 0; i < pta_size; i++)
1393 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1395 ix86_tune = processor_alias_table[i].processor;
1396 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1398 if (ix86_tune_defaulted)
1400 ix86_tune_string = "x86-64";
1401 for (i = 0; i < pta_size; i++)
1402 if (! strcmp (ix86_tune_string,
1403 processor_alias_table[i].name))
1405 ix86_tune = processor_alias_table[i].processor;
1408 error ("CPU you selected does not support x86-64 "
1411 /* Intel CPUs have always interpreted SSE prefetch instructions as
1412 NOPs; so, we can enable SSE prefetch instructions even when
1413 -mtune (rather than -march) points us to a processor that has them.
1414 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1415 higher processors. */
1416 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1417 x86_prefetch_sse = true;
1421 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1424 ix86_cost = &size_cost;
1426 ix86_cost = processor_target_table[ix86_tune].cost;
1427 target_flags |= processor_target_table[ix86_tune].target_enable;
1428 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1430 /* Arrange to set up i386_stack_locals for all functions. */
1431 init_machine_status = ix86_init_machine_status;
1433 /* Validate -mregparm= value. */
1434 if (ix86_regparm_string)
1436 i = atoi (ix86_regparm_string);
1437 if (i < 0 || i > REGPARM_MAX)
1438 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1444 ix86_regparm = REGPARM_MAX;
1446 /* If the user has provided any of the -malign-* options,
1447 warn and use that value only if -falign-* is not set.
1448 Remove this code in GCC 3.2 or later. */
1449 if (ix86_align_loops_string)
1451 warning (0, "-malign-loops is obsolete, use -falign-loops");
1452 if (align_loops == 0)
1454 i = atoi (ix86_align_loops_string);
1455 if (i < 0 || i > MAX_CODE_ALIGN)
1456 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1458 align_loops = 1 << i;
1462 if (ix86_align_jumps_string)
1464 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
1465 if (align_jumps == 0)
1467 i = atoi (ix86_align_jumps_string);
1468 if (i < 0 || i > MAX_CODE_ALIGN)
1469 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1471 align_jumps = 1 << i;
1475 if (ix86_align_funcs_string)
1477 warning (0, "-malign-functions is obsolete, use -falign-functions");
1478 if (align_functions == 0)
1480 i = atoi (ix86_align_funcs_string);
1481 if (i < 0 || i > MAX_CODE_ALIGN)
1482 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1484 align_functions = 1 << i;
1488 /* Default align_* from the processor table. */
1489 if (align_loops == 0)
1491 align_loops = processor_target_table[ix86_tune].align_loop;
1492 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1494 if (align_jumps == 0)
1496 align_jumps = processor_target_table[ix86_tune].align_jump;
1497 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1499 if (align_functions == 0)
1501 align_functions = processor_target_table[ix86_tune].align_func;
1504 /* Validate -mpreferred-stack-boundary= value, or provide default.
1505 The default of 128 bits is for Pentium III's SSE __m128, but we
1506 don't want additional code to keep the stack aligned when
1507 optimizing for code size. */
1508 ix86_preferred_stack_boundary = (optimize_size
1509 ? TARGET_64BIT ? 128 : 32
1511 if (ix86_preferred_stack_boundary_string)
1513 i = atoi (ix86_preferred_stack_boundary_string);
1514 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1515 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1516 TARGET_64BIT ? 4 : 2);
1518 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1521 /* Validate -mbranch-cost= value, or provide default. */
1522 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1523 if (ix86_branch_cost_string)
1525 i = atoi (ix86_branch_cost_string);
1527 error ("-mbranch-cost=%d is not between 0 and 5", i);
1529 ix86_branch_cost = i;
1531 if (ix86_section_threshold_string)
1533 i = atoi (ix86_section_threshold_string);
1535 error ("-mlarge-data-threshold=%d is negative", i);
1537 ix86_section_threshold = i;
1540 if (ix86_tls_dialect_string)
1542 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1543 ix86_tls_dialect = TLS_DIALECT_GNU;
1544 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1545 ix86_tls_dialect = TLS_DIALECT_SUN;
1547 error ("bad value (%s) for -mtls-dialect= switch",
1548 ix86_tls_dialect_string);
1551 /* Keep nonleaf frame pointers. */
1552 if (flag_omit_frame_pointer)
1553 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1554 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1555 flag_omit_frame_pointer = 1;
1557 /* If we're doing fast math, we don't care about comparison order
1558 wrt NaNs. This lets us use a shorter comparison sequence. */
1559 if (flag_unsafe_math_optimizations)
1560 target_flags &= ~MASK_IEEE_FP;
1562 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1563 since the insns won't need emulation. */
1564 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1565 target_flags &= ~MASK_NO_FANCY_MATH_387;
1567 /* Likewise, if the target doesn't have a 387, or we've specified
1568 software floating point, don't use 387 inline intrinsics. */
1570 target_flags |= MASK_NO_FANCY_MATH_387;
1572 /* Turn on SSE2 builtins for -msse3. */
1574 target_flags |= MASK_SSE2;
1576 /* Turn on SSE builtins for -msse2. */
1578 target_flags |= MASK_SSE;
1580 /* Turn on MMX builtins for -msse. */
1583 target_flags |= MASK_MMX & ~target_flags_explicit;
1584 x86_prefetch_sse = true;
1587 /* Turn on MMX builtins for 3Dnow. */
1589 target_flags |= MASK_MMX;
1593 if (TARGET_ALIGN_DOUBLE)
1594 error ("-malign-double makes no sense in the 64bit mode");
1596 error ("-mrtd calling convention not supported in the 64bit mode");
1598 /* Enable by default the SSE and MMX builtins. Do allow the user to
1599 explicitly disable any of these. In particular, disabling SSE and
1600 MMX for kernel code is extremely useful. */
1602 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1603 & ~target_flags_explicit);
1607 /* i386 ABI does not specify red zone. It still makes sense to use it
1608 when programmer takes care to stack from being destroyed. */
1609 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1610 target_flags |= MASK_NO_RED_ZONE;
1613 /* Accept -msseregparm only if at least SSE support is enabled. */
1614 if (TARGET_SSEREGPARM
1616 error ("-msseregparm used without SSE enabled");
1618 ix86_fpmath = TARGET_FPMATH_DEFAULT;
1620 if (ix86_fpmath_string != 0)
1622 if (! strcmp (ix86_fpmath_string, "387"))
1623 ix86_fpmath = FPMATH_387;
1624 else if (! strcmp (ix86_fpmath_string, "sse"))
1628 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1629 ix86_fpmath = FPMATH_387;
1632 ix86_fpmath = FPMATH_SSE;
1634 else if (! strcmp (ix86_fpmath_string, "387,sse")
1635 || ! strcmp (ix86_fpmath_string, "sse,387"))
1639 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1640 ix86_fpmath = FPMATH_387;
1642 else if (!TARGET_80387)
1644 warning (0, "387 instruction set disabled, using SSE arithmetics");
1645 ix86_fpmath = FPMATH_SSE;
1648 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1651 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1654 /* If the i387 is disabled, then do not return values in it. */
1656 target_flags &= ~MASK_FLOAT_RETURNS;
1658 if ((x86_accumulate_outgoing_args & TUNEMASK)
1659 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1661 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1663 /* ??? Unwind info is not correct around the CFG unless either a frame
1664 pointer is present or M_A_O_A is set. Fixing this requires rewriting
1665 unwind info generation to be aware of the CFG and propagating states
1667 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
1668 || flag_exceptions || flag_non_call_exceptions)
1669 && flag_omit_frame_pointer
1670 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
1672 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1673 warning (0, "unwind tables currently require either a frame pointer "
1674 "or -maccumulate-outgoing-args for correctness");
1675 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1678 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1681 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1682 p = strchr (internal_label_prefix, 'X');
1683 internal_label_prefix_len = p - internal_label_prefix;
1687 /* When scheduling description is not available, disable scheduler pass
1688 so it won't slow down the compilation and make x87 code slower. */
1689 if (!TARGET_SCHEDULE)
1690 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1693 /* switch to the appropriate section for output of DECL.
1694 DECL is either a `VAR_DECL' node or a constant of some sort.
1695 RELOC indicates whether forming the initial value of DECL requires
1696 link-time relocations. */
1699 x86_64_elf_select_section (tree decl, int reloc,
1700 unsigned HOST_WIDE_INT align)
1702 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1703 && ix86_in_large_data_p (decl))
1705 const char *sname = NULL;
1706 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1711 case SECCAT_DATA_REL:
1712 sname = ".ldata.rel";
1714 case SECCAT_DATA_REL_LOCAL:
1715 sname = ".ldata.rel.local";
1717 case SECCAT_DATA_REL_RO:
1718 sname = ".ldata.rel.ro";
1720 case SECCAT_DATA_REL_RO_LOCAL:
1721 sname = ".ldata.rel.ro.local";
1727 case SECCAT_RODATA_MERGE_STR:
1728 case SECCAT_RODATA_MERGE_STR_INIT:
1729 case SECCAT_RODATA_MERGE_CONST:
1732 case SECCAT_SRODATA:
1739 /* We don't split these for medium model. Place them into
1740 default sections and hope for best. */
1745 named_section (decl, sname, reloc);
1749 default_elf_select_section (decl, reloc, align);
1752 /* Build up a unique section name, expressed as a
1753 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
1754 RELOC indicates whether the initial value of EXP requires
1755 link-time relocations. */
1758 x86_64_elf_unique_section (tree decl, int reloc)
1760 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1761 && ix86_in_large_data_p (decl))
1763 const char *prefix = NULL;
1764 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
1765 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
1767 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1770 case SECCAT_DATA_REL:
1771 case SECCAT_DATA_REL_LOCAL:
1772 case SECCAT_DATA_REL_RO:
1773 case SECCAT_DATA_REL_RO_LOCAL:
1774 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
1777 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
1780 case SECCAT_RODATA_MERGE_STR:
1781 case SECCAT_RODATA_MERGE_STR_INIT:
1782 case SECCAT_RODATA_MERGE_CONST:
1783 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
1785 case SECCAT_SRODATA:
1792 /* We don't split these for medium model. Place them into
1793 default sections and hope for best. */
1801 plen = strlen (prefix);
1803 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
1804 name = targetm.strip_name_encoding (name);
1805 nlen = strlen (name);
1807 string = alloca (nlen + plen + 1);
1808 memcpy (string, prefix, plen);
1809 memcpy (string + plen, name, nlen + 1);
1811 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
1815 default_unique_section (decl, reloc);
1818 #ifdef COMMON_ASM_OP
1819 /* This says how to output assembler code to declare an
1820 uninitialized external linkage data object.
1822 For medium model x86-64 we need to use .largecomm opcode for
1825 x86_elf_aligned_common (FILE *file,
1826 const char *name, unsigned HOST_WIDE_INT size,
1829 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1830 && size > (unsigned int)ix86_section_threshold)
1831 fprintf (file, ".largecomm\t");
1833 fprintf (file, "%s", COMMON_ASM_OP);
1834 assemble_name (file, name);
1835 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
1836 size, align / BITS_PER_UNIT);
1839 /* Utility function for targets to use in implementing
1840 ASM_OUTPUT_ALIGNED_BSS. */
1843 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
1844 const char *name, unsigned HOST_WIDE_INT size,
1847 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1848 && size > (unsigned int)ix86_section_threshold)
1849 named_section (decl, ".lbss", 0);
1852 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
1853 #ifdef ASM_DECLARE_OBJECT_NAME
1854 last_assemble_variable_decl = decl;
1855 ASM_DECLARE_OBJECT_NAME (file, name, decl);
1857 /* Standard thing is just output label for the object. */
1858 ASM_OUTPUT_LABEL (file, name);
1859 #endif /* ASM_DECLARE_OBJECT_NAME */
1860 ASM_OUTPUT_SKIP (file, size ? size : 1);
1865 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1867 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1868 make the problem with not enough registers even worse. */
1869 #ifdef INSN_SCHEDULING
1871 flag_schedule_insns = 0;
1875 /* The Darwin libraries never set errno, so we might as well
1876 avoid calling them when that's the only reason we would. */
1877 flag_errno_math = 0;
1879 /* The default values of these switches depend on the TARGET_64BIT
1880 that is not known at this moment. Mark these values with 2 and
1881 let user the to override these. In case there is no command line option
1882 specifying them, we will set the defaults in override_options. */
1884 flag_omit_frame_pointer = 2;
1885 flag_pcc_struct_return = 2;
1886 flag_asynchronous_unwind_tables = 2;
1887 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1888 SUBTARGET_OPTIMIZATION_OPTIONS;
1892 /* Table of valid machine attributes. */
1893 const struct attribute_spec ix86_attribute_table[] =
1895 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1896 /* Stdcall attribute says callee is responsible for popping arguments
1897 if they are not variable. */
1898 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1899 /* Fastcall attribute says callee is responsible for popping arguments
1900 if they are not variable. */
1901 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1902 /* Cdecl attribute says the callee is a normal C declaration */
1903 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1904 /* Regparm attribute specifies how many integer arguments are to be
1905 passed in registers. */
1906 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
1907 /* Sseregparm attribute says we are using x86_64 calling conventions
1908 for FP arguments. */
1909 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1910 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1911 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1912 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1913 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1915 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1916 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1917 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1918 SUBTARGET_ATTRIBUTE_TABLE,
1920 { NULL, 0, 0, false, false, false, NULL }
1923 /* Decide whether we can make a sibling call to a function. DECL is the
1924 declaration of the function being targeted by the call and EXP is the
1925 CALL_EXPR representing the call. */
1928 ix86_function_ok_for_sibcall (tree decl, tree exp)
1933 /* If we are generating position-independent code, we cannot sibcall
1934 optimize any indirect call, or a direct call to a global function,
1935 as the PLT requires %ebx be live. */
1936 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1943 func = TREE_TYPE (TREE_OPERAND (exp, 0));
1944 if (POINTER_TYPE_P (func))
1945 func = TREE_TYPE (func);
1948 /* Check that the return value locations are the same. Like
1949 if we are returning floats on the 80387 register stack, we cannot
1950 make a sibcall from a function that doesn't return a float to a
1951 function that does or, conversely, from a function that does return
1952 a float to a function that doesn't; the necessary stack adjustment
1953 would not be executed. This is also the place we notice
1954 differences in the return value ABI. Note that it is ok for one
1955 of the functions to have void return type as long as the return
1956 value of the other is passed in a register. */
1957 a = ix86_function_value (TREE_TYPE (exp), func, false);
1958 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
1960 if (STACK_REG_P (a) || STACK_REG_P (b))
1962 if (!rtx_equal_p (a, b))
1965 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1967 else if (!rtx_equal_p (a, b))
1970 /* If this call is indirect, we'll need to be able to use a call-clobbered
1971 register for the address of the target function. Make sure that all
1972 such registers are not used for passing parameters. */
1973 if (!decl && !TARGET_64BIT)
1977 /* We're looking at the CALL_EXPR, we need the type of the function. */
1978 type = TREE_OPERAND (exp, 0); /* pointer expression */
1979 type = TREE_TYPE (type); /* pointer type */
1980 type = TREE_TYPE (type); /* function type */
1982 if (ix86_function_regparm (type, NULL) >= 3)
1984 /* ??? Need to count the actual number of registers to be used,
1985 not the possible number of registers. Fix later. */
1990 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1991 /* Dllimport'd functions are also called indirectly. */
1992 if (decl && DECL_DLLIMPORT_P (decl)
1993 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
1997 /* If we forced aligned the stack, then sibcalling would unalign the
1998 stack, which may break the called function. */
1999 if (cfun->machine->force_align_arg_pointer)
2002 /* Otherwise okay. That also includes certain types of indirect calls. */
2006 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2007 calling convention attributes;
2008 arguments as in struct attribute_spec.handler. */
2011 ix86_handle_cconv_attribute (tree *node, tree name,
2013 int flags ATTRIBUTE_UNUSED,
2016 if (TREE_CODE (*node) != FUNCTION_TYPE
2017 && TREE_CODE (*node) != METHOD_TYPE
2018 && TREE_CODE (*node) != FIELD_DECL
2019 && TREE_CODE (*node) != TYPE_DECL)
2021 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2022 IDENTIFIER_POINTER (name));
2023 *no_add_attrs = true;
2027 /* Can combine regparm with all attributes but fastcall. */
2028 if (is_attribute_p ("regparm", name))
2032 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2034 error ("fastcall and regparm attributes are not compatible");
2037 cst = TREE_VALUE (args);
2038 if (TREE_CODE (cst) != INTEGER_CST)
2040 warning (OPT_Wattributes,
2041 "%qs attribute requires an integer constant argument",
2042 IDENTIFIER_POINTER (name));
2043 *no_add_attrs = true;
2045 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2047 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2048 IDENTIFIER_POINTER (name), REGPARM_MAX);
2049 *no_add_attrs = true;
2057 warning (OPT_Wattributes, "%qs attribute ignored",
2058 IDENTIFIER_POINTER (name));
2059 *no_add_attrs = true;
2063 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2064 if (is_attribute_p ("fastcall", name))
2066 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2068 error ("fastcall and cdecl attributes are not compatible");
2070 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2072 error ("fastcall and stdcall attributes are not compatible");
2074 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2076 error ("fastcall and regparm attributes are not compatible");
2080 /* Can combine stdcall with fastcall (redundant), regparm and
2082 else if (is_attribute_p ("stdcall", name))
2084 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2086 error ("stdcall and cdecl attributes are not compatible");
2088 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2090 error ("stdcall and fastcall attributes are not compatible");
2094 /* Can combine cdecl with regparm and sseregparm. */
2095 else if (is_attribute_p ("cdecl", name))
2097 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2099 error ("stdcall and cdecl attributes are not compatible");
2101 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2103 error ("fastcall and cdecl attributes are not compatible");
2107 /* Can combine sseregparm with all attributes. */
2112 /* Return 0 if the attributes for two types are incompatible, 1 if they
2113 are compatible, and 2 if they are nearly compatible (which causes a
2114 warning to be generated). */
2117 ix86_comp_type_attributes (tree type1, tree type2)
2119 /* Check for mismatch of non-default calling convention. */
2120 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2122 if (TREE_CODE (type1) != FUNCTION_TYPE)
2125 /* Check for mismatched fastcall/regparm types. */
2126 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2127 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2128 || (ix86_function_regparm (type1, NULL)
2129 != ix86_function_regparm (type2, NULL)))
2132 /* Check for mismatched sseregparm types. */
2133 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2134 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2137 /* Check for mismatched return types (cdecl vs stdcall). */
2138 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2139 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2145 /* Return the regparm value for a function with the indicated TYPE and DECL.
2146 DECL may be NULL when calling function indirectly
2147 or considering a libcall. */
2150 ix86_function_regparm (tree type, tree decl)
2153 int regparm = ix86_regparm;
2154 bool user_convention = false;
2158 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2161 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2162 user_convention = true;
2165 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2168 user_convention = true;
2171 /* Use register calling convention for local functions when possible. */
2172 if (!TARGET_64BIT && !user_convention && decl
2173 && flag_unit_at_a_time && !profile_flag)
2175 struct cgraph_local_info *i = cgraph_local_info (decl);
2178 int local_regparm, globals = 0, regno;
2180 /* Make sure no regparm register is taken by a global register
2182 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2183 if (global_regs[local_regparm])
2185 /* We can't use regparm(3) for nested functions as these use
2186 static chain pointer in third argument. */
2187 if (local_regparm == 3
2188 && decl_function_context (decl)
2189 && !DECL_NO_STATIC_CHAIN (decl))
2191 /* Each global register variable increases register preassure,
2192 so the more global reg vars there are, the smaller regparm
2193 optimization use, unless requested by the user explicitly. */
2194 for (regno = 0; regno < 6; regno++)
2195 if (global_regs[regno])
2198 = globals < local_regparm ? local_regparm - globals : 0;
2200 if (local_regparm > regparm)
2201 regparm = local_regparm;
2208 /* Return 1 or 2, if we can pass up to 8 SFmode (1) and DFmode (2) arguments
2209 in SSE registers for a function with the indicated TYPE and DECL.
2210 DECL may be NULL when calling function indirectly
2211 or considering a libcall. Otherwise return 0. */
2214 ix86_function_sseregparm (tree type, tree decl)
2216 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2217 by the sseregparm attribute. */
2218 if (TARGET_SSEREGPARM
2220 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2225 error ("Calling %qD with attribute sseregparm without "
2226 "SSE/SSE2 enabled", decl);
2228 error ("Calling %qT with attribute sseregparm without "
2229 "SSE/SSE2 enabled", type);
2236 /* For local functions, pass SFmode (and DFmode for SSE2) arguments
2237 in SSE registers even for 32-bit mode and not just 3, but up to
2238 8 SSE arguments in registers. */
2239 if (!TARGET_64BIT && decl
2240 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2242 struct cgraph_local_info *i = cgraph_local_info (decl);
2244 return TARGET_SSE2 ? 2 : 1;
2250 /* Return true if EAX is live at the start of the function. Used by
2251 ix86_expand_prologue to determine if we need special help before
2252 calling allocate_stack_worker. */
2255 ix86_eax_live_at_start_p (void)
2257 /* Cheat. Don't bother working forward from ix86_function_regparm
2258 to the function type to whether an actual argument is located in
2259 eax. Instead just look at cfg info, which is still close enough
2260 to correct at this point. This gives false positives for broken
2261 functions that might use uninitialized data that happens to be
2262 allocated in eax, but who cares? */
2263 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2266 /* Value is the number of bytes of arguments automatically
2267 popped when returning from a subroutine call.
2268 FUNDECL is the declaration node of the function (as a tree),
2269 FUNTYPE is the data type of the function (as a tree),
2270 or for a library call it is an identifier node for the subroutine name.
2271 SIZE is the number of bytes of arguments passed on the stack.
2273 On the 80386, the RTD insn may be used to pop them if the number
2274 of args is fixed, but if the number is variable then the caller
2275 must pop them all. RTD can't be used for library calls now
2276 because the library is compiled with the Unix compiler.
2277 Use of RTD is a selectable option, since it is incompatible with
2278 standard Unix calling sequences. If the option is not selected,
2279 the caller must always pop the args.
2281 The attribute stdcall is equivalent to RTD on a per module basis. */
2284 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2286 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2288 /* Cdecl functions override -mrtd, and never pop the stack. */
2289 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2291 /* Stdcall and fastcall functions will pop the stack if not
2293 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2294 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2298 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2299 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2300 == void_type_node)))
2304 /* Lose any fake structure return argument if it is passed on the stack. */
2305 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2307 && !KEEP_AGGREGATE_RETURN_POINTER)
2309 int nregs = ix86_function_regparm (funtype, fundecl);
2312 return GET_MODE_SIZE (Pmode);
2318 /* Argument support functions. */
2320 /* Return true when register may be used to pass function parameters. */
2322 ix86_function_arg_regno_p (int regno)
2326 return (regno < REGPARM_MAX
2327 || (TARGET_MMX && MMX_REGNO_P (regno)
2328 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2329 || (TARGET_SSE && SSE_REGNO_P (regno)
2330 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2332 if (TARGET_SSE && SSE_REGNO_P (regno)
2333 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2335 /* RAX is used as hidden argument to va_arg functions. */
2338 for (i = 0; i < REGPARM_MAX; i++)
2339 if (regno == x86_64_int_parameter_registers[i])
2344 /* Return if we do not know how to pass TYPE solely in registers. */
2347 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2349 if (must_pass_in_stack_var_size_or_pad (mode, type))
2352 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2353 The layout_type routine is crafty and tries to trick us into passing
2354 currently unsupported vector types on the stack by using TImode. */
2355 return (!TARGET_64BIT && mode == TImode
2356 && type && TREE_CODE (type) != VECTOR_TYPE);
2359 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2360 for a call to a function whose data type is FNTYPE.
2361 For a library call, FNTYPE is 0. */
2364 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2365 tree fntype, /* tree ptr for function decl */
2366 rtx libname, /* SYMBOL_REF of library name or 0 */
2369 static CUMULATIVE_ARGS zero_cum;
2370 tree param, next_param;
2372 if (TARGET_DEBUG_ARG)
2374 fprintf (stderr, "\ninit_cumulative_args (");
2376 fprintf (stderr, "fntype code = %s, ret code = %s",
2377 tree_code_name[(int) TREE_CODE (fntype)],
2378 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2380 fprintf (stderr, "no fntype");
2383 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2388 /* Set up the number of registers to use for passing arguments. */
2389 cum->nregs = ix86_regparm;
2391 cum->sse_nregs = SSE_REGPARM_MAX;
2393 cum->mmx_nregs = MMX_REGPARM_MAX;
2394 cum->warn_sse = true;
2395 cum->warn_mmx = true;
2396 cum->maybe_vaarg = false;
2398 /* Use ecx and edx registers if function has fastcall attribute,
2399 else look for regparm information. */
2400 if (fntype && !TARGET_64BIT)
2402 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2408 cum->nregs = ix86_function_regparm (fntype, fndecl);
2411 /* Set up the number of SSE registers used for passing SFmode
2412 and DFmode arguments. Warn for mismatching ABI. */
2413 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2415 /* Determine if this function has variable arguments. This is
2416 indicated by the last argument being 'void_type_mode' if there
2417 are no variable arguments. If there are variable arguments, then
2418 we won't pass anything in registers in 32-bit mode. */
2420 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2422 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2423 param != 0; param = next_param)
2425 next_param = TREE_CHAIN (param);
2426 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2436 cum->float_in_sse = 0;
2438 cum->maybe_vaarg = true;
2442 if ((!fntype && !libname)
2443 || (fntype && !TYPE_ARG_TYPES (fntype)))
2444 cum->maybe_vaarg = true;
2446 if (TARGET_DEBUG_ARG)
2447 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2452 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2453 But in the case of vector types, it is some vector mode.
2455 When we have only some of our vector isa extensions enabled, then there
2456 are some modes for which vector_mode_supported_p is false. For these
2457 modes, the generic vector support in gcc will choose some non-vector mode
2458 in order to implement the type. By computing the natural mode, we'll
2459 select the proper ABI location for the operand and not depend on whatever
2460 the middle-end decides to do with these vector types. */
2462 static enum machine_mode
2463 type_natural_mode (tree type)
2465 enum machine_mode mode = TYPE_MODE (type);
2467 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2469 HOST_WIDE_INT size = int_size_in_bytes (type);
2470 if ((size == 8 || size == 16)
2471 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2472 && TYPE_VECTOR_SUBPARTS (type) > 1)
2474 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2476 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2477 mode = MIN_MODE_VECTOR_FLOAT;
2479 mode = MIN_MODE_VECTOR_INT;
2481 /* Get the mode which has this inner mode and number of units. */
2482 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2483 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2484 && GET_MODE_INNER (mode) == innermode)
2494 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2495 this may not agree with the mode that the type system has chosen for the
2496 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2497 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2500 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2505 if (orig_mode != BLKmode)
2506 tmp = gen_rtx_REG (orig_mode, regno);
2509 tmp = gen_rtx_REG (mode, regno);
2510 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2511 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2517 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2518 of this code is to classify each 8bytes of incoming argument by the register
2519 class and assign registers accordingly. */
2521 /* Return the union class of CLASS1 and CLASS2.
2522 See the x86-64 PS ABI for details. */
2524 static enum x86_64_reg_class
2525 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2527 /* Rule #1: If both classes are equal, this is the resulting class. */
2528 if (class1 == class2)
2531 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2533 if (class1 == X86_64_NO_CLASS)
2535 if (class2 == X86_64_NO_CLASS)
2538 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2539 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2540 return X86_64_MEMORY_CLASS;
2542 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2543 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2544 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2545 return X86_64_INTEGERSI_CLASS;
2546 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2547 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2548 return X86_64_INTEGER_CLASS;
2550 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2552 if (class1 == X86_64_X87_CLASS
2553 || class1 == X86_64_X87UP_CLASS
2554 || class1 == X86_64_COMPLEX_X87_CLASS
2555 || class2 == X86_64_X87_CLASS
2556 || class2 == X86_64_X87UP_CLASS
2557 || class2 == X86_64_COMPLEX_X87_CLASS)
2558 return X86_64_MEMORY_CLASS;
2560 /* Rule #6: Otherwise class SSE is used. */
2561 return X86_64_SSE_CLASS;
2564 /* Classify the argument of type TYPE and mode MODE.
2565 CLASSES will be filled by the register class used to pass each word
2566 of the operand. The number of words is returned. In case the parameter
2567 should be passed in memory, 0 is returned. As a special case for zero
2568 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2570 BIT_OFFSET is used internally for handling records and specifies offset
2571 of the offset in bits modulo 256 to avoid overflow cases.
2573 See the x86-64 PS ABI for details.
2577 classify_argument (enum machine_mode mode, tree type,
2578 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2580 HOST_WIDE_INT bytes =
2581 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2582 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2584 /* Variable sized entities are always passed/returned in memory. */
2588 if (mode != VOIDmode
2589 && targetm.calls.must_pass_in_stack (mode, type))
2592 if (type && AGGREGATE_TYPE_P (type))
2596 enum x86_64_reg_class subclasses[MAX_CLASSES];
2598 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2602 for (i = 0; i < words; i++)
2603 classes[i] = X86_64_NO_CLASS;
2605 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2606 signalize memory class, so handle it as special case. */
2609 classes[0] = X86_64_NO_CLASS;
2613 /* Classify each field of record and merge classes. */
2614 switch (TREE_CODE (type))
2617 /* For classes first merge in the field of the subclasses. */
2618 if (TYPE_BINFO (type))
2620 tree binfo, base_binfo;
2623 for (binfo = TYPE_BINFO (type), basenum = 0;
2624 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2627 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2628 tree type = BINFO_TYPE (base_binfo);
2630 num = classify_argument (TYPE_MODE (type),
2632 (offset + bit_offset) % 256);
2635 for (i = 0; i < num; i++)
2637 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2639 merge_classes (subclasses[i], classes[i + pos]);
2643 /* And now merge the fields of structure. */
2644 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2646 if (TREE_CODE (field) == FIELD_DECL)
2650 /* Bitfields are always classified as integer. Handle them
2651 early, since later code would consider them to be
2652 misaligned integers. */
2653 if (DECL_BIT_FIELD (field))
2655 for (i = int_bit_position (field) / 8 / 8;
2656 i < (int_bit_position (field)
2657 + tree_low_cst (DECL_SIZE (field), 0)
2660 merge_classes (X86_64_INTEGER_CLASS,
2665 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2666 TREE_TYPE (field), subclasses,
2667 (int_bit_position (field)
2668 + bit_offset) % 256);
2671 for (i = 0; i < num; i++)
2674 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2676 merge_classes (subclasses[i], classes[i + pos]);
2684 /* Arrays are handled as small records. */
2687 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2688 TREE_TYPE (type), subclasses, bit_offset);
2692 /* The partial classes are now full classes. */
2693 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2694 subclasses[0] = X86_64_SSE_CLASS;
2695 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2696 subclasses[0] = X86_64_INTEGER_CLASS;
2698 for (i = 0; i < words; i++)
2699 classes[i] = subclasses[i % num];
2704 case QUAL_UNION_TYPE:
2705 /* Unions are similar to RECORD_TYPE but offset is always 0.
2708 /* Unions are not derived. */
2709 gcc_assert (!TYPE_BINFO (type)
2710 || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
2711 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2713 if (TREE_CODE (field) == FIELD_DECL)
2716 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2717 TREE_TYPE (field), subclasses,
2721 for (i = 0; i < num; i++)
2722 classes[i] = merge_classes (subclasses[i], classes[i]);
2731 /* Final merger cleanup. */
2732 for (i = 0; i < words; i++)
2734 /* If one class is MEMORY, everything should be passed in
2736 if (classes[i] == X86_64_MEMORY_CLASS)
2739 /* The X86_64_SSEUP_CLASS should be always preceded by
2740 X86_64_SSE_CLASS. */
2741 if (classes[i] == X86_64_SSEUP_CLASS
2742 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2743 classes[i] = X86_64_SSE_CLASS;
2745 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2746 if (classes[i] == X86_64_X87UP_CLASS
2747 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2748 classes[i] = X86_64_SSE_CLASS;
2753 /* Compute alignment needed. We align all types to natural boundaries with
2754 exception of XFmode that is aligned to 64bits. */
2755 if (mode != VOIDmode && mode != BLKmode)
2757 int mode_alignment = GET_MODE_BITSIZE (mode);
2760 mode_alignment = 128;
2761 else if (mode == XCmode)
2762 mode_alignment = 256;
2763 if (COMPLEX_MODE_P (mode))
2764 mode_alignment /= 2;
2765 /* Misaligned fields are always returned in memory. */
2766 if (bit_offset % mode_alignment)
2770 /* for V1xx modes, just use the base mode */
2771 if (VECTOR_MODE_P (mode)
2772 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2773 mode = GET_MODE_INNER (mode);
2775 /* Classification of atomic types. */
2785 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2786 classes[0] = X86_64_INTEGERSI_CLASS;
2788 classes[0] = X86_64_INTEGER_CLASS;
2792 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2797 if (!(bit_offset % 64))
2798 classes[0] = X86_64_SSESF_CLASS;
2800 classes[0] = X86_64_SSE_CLASS;
2803 classes[0] = X86_64_SSEDF_CLASS;
2806 classes[0] = X86_64_X87_CLASS;
2807 classes[1] = X86_64_X87UP_CLASS;
2810 classes[0] = X86_64_SSE_CLASS;
2811 classes[1] = X86_64_SSEUP_CLASS;
2814 classes[0] = X86_64_SSE_CLASS;
2817 classes[0] = X86_64_SSEDF_CLASS;
2818 classes[1] = X86_64_SSEDF_CLASS;
2821 classes[0] = X86_64_COMPLEX_X87_CLASS;
2824 /* This modes is larger than 16 bytes. */
2832 classes[0] = X86_64_SSE_CLASS;
2833 classes[1] = X86_64_SSEUP_CLASS;
2839 classes[0] = X86_64_SSE_CLASS;
2845 gcc_assert (VECTOR_MODE_P (mode));
2850 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
2852 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2853 classes[0] = X86_64_INTEGERSI_CLASS;
2855 classes[0] = X86_64_INTEGER_CLASS;
2856 classes[1] = X86_64_INTEGER_CLASS;
2857 return 1 + (bytes > 8);
2861 /* Examine the argument and return set number of register required in each
2862 class. Return 0 iff parameter should be passed in memory. */
2864 examine_argument (enum machine_mode mode, tree type, int in_return,
2865 int *int_nregs, int *sse_nregs)
2867 enum x86_64_reg_class class[MAX_CLASSES];
2868 int n = classify_argument (mode, type, class, 0);
2874 for (n--; n >= 0; n--)
2877 case X86_64_INTEGER_CLASS:
2878 case X86_64_INTEGERSI_CLASS:
2881 case X86_64_SSE_CLASS:
2882 case X86_64_SSESF_CLASS:
2883 case X86_64_SSEDF_CLASS:
2886 case X86_64_NO_CLASS:
2887 case X86_64_SSEUP_CLASS:
2889 case X86_64_X87_CLASS:
2890 case X86_64_X87UP_CLASS:
2894 case X86_64_COMPLEX_X87_CLASS:
2895 return in_return ? 2 : 0;
2896 case X86_64_MEMORY_CLASS:
2902 /* Construct container for the argument used by GCC interface. See
2903 FUNCTION_ARG for the detailed description. */
2906 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2907 tree type, int in_return, int nintregs, int nsseregs,
2908 const int *intreg, int sse_regno)
2910 enum machine_mode tmpmode;
2912 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2913 enum x86_64_reg_class class[MAX_CLASSES];
2917 int needed_sseregs, needed_intregs;
2918 rtx exp[MAX_CLASSES];
2921 n = classify_argument (mode, type, class, 0);
2922 if (TARGET_DEBUG_ARG)
2925 fprintf (stderr, "Memory class\n");
2928 fprintf (stderr, "Classes:");
2929 for (i = 0; i < n; i++)
2931 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2933 fprintf (stderr, "\n");
2938 if (!examine_argument (mode, type, in_return, &needed_intregs,
2941 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2944 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2945 some less clueful developer tries to use floating-point anyway. */
2946 if (needed_sseregs && !TARGET_SSE)
2948 static bool issued_error;
2951 issued_error = true;
2953 error ("SSE register return with SSE disabled");
2955 error ("SSE register argument with SSE disabled");
2960 /* First construct simple cases. Avoid SCmode, since we want to use
2961 single register to pass this type. */
2962 if (n == 1 && mode != SCmode)
2965 case X86_64_INTEGER_CLASS:
2966 case X86_64_INTEGERSI_CLASS:
2967 return gen_rtx_REG (mode, intreg[0]);
2968 case X86_64_SSE_CLASS:
2969 case X86_64_SSESF_CLASS:
2970 case X86_64_SSEDF_CLASS:
2971 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2972 case X86_64_X87_CLASS:
2973 case X86_64_COMPLEX_X87_CLASS:
2974 return gen_rtx_REG (mode, FIRST_STACK_REG);
2975 case X86_64_NO_CLASS:
2976 /* Zero sized array, struct or class. */
2981 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2983 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2985 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2986 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2987 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2988 && class[1] == X86_64_INTEGER_CLASS
2989 && (mode == CDImode || mode == TImode || mode == TFmode)
2990 && intreg[0] + 1 == intreg[1])
2991 return gen_rtx_REG (mode, intreg[0]);
2993 /* Otherwise figure out the entries of the PARALLEL. */
2994 for (i = 0; i < n; i++)
2998 case X86_64_NO_CLASS:
3000 case X86_64_INTEGER_CLASS:
3001 case X86_64_INTEGERSI_CLASS:
3002 /* Merge TImodes on aligned occasions here too. */
3003 if (i * 8 + 8 > bytes)
3004 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3005 else if (class[i] == X86_64_INTEGERSI_CLASS)
3009 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3010 if (tmpmode == BLKmode)
3012 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3013 gen_rtx_REG (tmpmode, *intreg),
3017 case X86_64_SSESF_CLASS:
3018 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3019 gen_rtx_REG (SFmode,
3020 SSE_REGNO (sse_regno)),
3024 case X86_64_SSEDF_CLASS:
3025 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3026 gen_rtx_REG (DFmode,
3027 SSE_REGNO (sse_regno)),
3031 case X86_64_SSE_CLASS:
3032 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3036 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3037 gen_rtx_REG (tmpmode,
3038 SSE_REGNO (sse_regno)),
3040 if (tmpmode == TImode)
3049 /* Empty aligned struct, union or class. */
3053 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3054 for (i = 0; i < nexps; i++)
3055 XVECEXP (ret, 0, i) = exp [i];
3059 /* Update the data in CUM to advance over an argument
3060 of mode MODE and data type TYPE.
3061 (TYPE is null for libcalls where that information may not be available.) */
3064 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3065 tree type, int named)
3068 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3069 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3072 mode = type_natural_mode (type);
3074 if (TARGET_DEBUG_ARG)
3075 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3076 "mode=%s, named=%d)\n\n",
3077 words, cum->words, cum->nregs, cum->sse_nregs,
3078 GET_MODE_NAME (mode), named);
3082 int int_nregs, sse_nregs;
3083 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3084 cum->words += words;
3085 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3087 cum->nregs -= int_nregs;
3088 cum->sse_nregs -= sse_nregs;
3089 cum->regno += int_nregs;
3090 cum->sse_regno += sse_nregs;
3093 cum->words += words;
3111 cum->words += words;
3112 cum->nregs -= words;
3113 cum->regno += words;
3115 if (cum->nregs <= 0)
3123 if (cum->float_in_sse < 2)
3126 if (cum->float_in_sse < 1)
3137 if (!type || !AGGREGATE_TYPE_P (type))
3139 cum->sse_words += words;
3140 cum->sse_nregs -= 1;
3141 cum->sse_regno += 1;
3142 if (cum->sse_nregs <= 0)
3154 if (!type || !AGGREGATE_TYPE_P (type))
3156 cum->mmx_words += words;
3157 cum->mmx_nregs -= 1;
3158 cum->mmx_regno += 1;
3159 if (cum->mmx_nregs <= 0)
3170 /* Define where to put the arguments to a function.
3171 Value is zero to push the argument on the stack,
3172 or a hard register in which to store the argument.
3174 MODE is the argument's machine mode.
3175 TYPE is the data type of the argument (as a tree).
3176 This is null for libcalls where that information may
3178 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3179 the preceding args and about the function being called.
3180 NAMED is nonzero if this argument is a named parameter
3181 (otherwise it is an extra parameter matching an ellipsis). */
3184 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3185 tree type, int named)
3187 enum machine_mode mode = orig_mode;
3190 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3191 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3192 static bool warnedsse, warnedmmx;
3194 /* To simplify the code below, represent vector types with a vector mode
3195 even if MMX/SSE are not active. */
3196 if (type && TREE_CODE (type) == VECTOR_TYPE)
3197 mode = type_natural_mode (type);
3199 /* Handle a hidden AL argument containing number of registers for varargs
3200 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3202 if (mode == VOIDmode)
3205 return GEN_INT (cum->maybe_vaarg
3206 ? (cum->sse_nregs < 0
3214 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3216 &x86_64_int_parameter_registers [cum->regno],
3221 /* For now, pass fp/complex values on the stack. */
3233 if (words <= cum->nregs)
3235 int regno = cum->regno;
3237 /* Fastcall allocates the first two DWORD (SImode) or
3238 smaller arguments to ECX and EDX. */
3241 if (mode == BLKmode || mode == DImode)
3244 /* ECX not EAX is the first allocated register. */
3248 ret = gen_rtx_REG (mode, regno);
3252 if (cum->float_in_sse < 2)
3255 if (cum->float_in_sse < 1)
3265 if (!type || !AGGREGATE_TYPE_P (type))
3267 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3270 warning (0, "SSE vector argument without SSE enabled "
3274 ret = gen_reg_or_parallel (mode, orig_mode,
3275 cum->sse_regno + FIRST_SSE_REG);
3282 if (!type || !AGGREGATE_TYPE_P (type))
3284 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3287 warning (0, "MMX vector argument without MMX enabled "
3291 ret = gen_reg_or_parallel (mode, orig_mode,
3292 cum->mmx_regno + FIRST_MMX_REG);
3297 if (TARGET_DEBUG_ARG)
3300 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3301 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3304 print_simple_rtl (stderr, ret);
3306 fprintf (stderr, ", stack");
3308 fprintf (stderr, " )\n");
3314 /* A C expression that indicates when an argument must be passed by
3315 reference. If nonzero for an argument, a copy of that argument is
3316 made in memory and a pointer to the argument is passed instead of
3317 the argument itself. The pointer is passed in whatever way is
3318 appropriate for passing a pointer to that type. */
3321 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3322 enum machine_mode mode ATTRIBUTE_UNUSED,
3323 tree type, bool named ATTRIBUTE_UNUSED)
3328 if (type && int_size_in_bytes (type) == -1)
3330 if (TARGET_DEBUG_ARG)
3331 fprintf (stderr, "function_arg_pass_by_reference\n");
3338 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3339 ABI. Only called if TARGET_SSE. */
3341 contains_128bit_aligned_vector_p (tree type)
3343 enum machine_mode mode = TYPE_MODE (type);
3344 if (SSE_REG_MODE_P (mode)
3345 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3347 if (TYPE_ALIGN (type) < 128)
3350 if (AGGREGATE_TYPE_P (type))
3352 /* Walk the aggregates recursively. */
3353 switch (TREE_CODE (type))
3357 case QUAL_UNION_TYPE:
3361 if (TYPE_BINFO (type))
3363 tree binfo, base_binfo;
3366 for (binfo = TYPE_BINFO (type), i = 0;
3367 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
3368 if (contains_128bit_aligned_vector_p
3369 (BINFO_TYPE (base_binfo)))
3372 /* And now merge the fields of structure. */
3373 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3375 if (TREE_CODE (field) == FIELD_DECL
3376 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3383 /* Just for use if some languages passes arrays by value. */
3384 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3395 /* Gives the alignment boundary, in bits, of an argument with the
3396 specified mode and type. */
3399 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3403 align = TYPE_ALIGN (type);
3405 align = GET_MODE_ALIGNMENT (mode);
3406 if (align < PARM_BOUNDARY)
3407 align = PARM_BOUNDARY;
3410 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3411 make an exception for SSE modes since these require 128bit
3414 The handling here differs from field_alignment. ICC aligns MMX
3415 arguments to 4 byte boundaries, while structure fields are aligned
3416 to 8 byte boundaries. */
3418 align = PARM_BOUNDARY;
3421 if (!SSE_REG_MODE_P (mode))
3422 align = PARM_BOUNDARY;
3426 if (!contains_128bit_aligned_vector_p (type))
3427 align = PARM_BOUNDARY;
3435 /* Return true if N is a possible register number of function value. */
3437 ix86_function_value_regno_p (int regno)
3440 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3441 || (regno == FIRST_SSE_REG && TARGET_SSE))
3445 && (regno == FIRST_MMX_REG && TARGET_MMX))
3451 /* Define how to find the value returned by a function.
3452 VALTYPE is the data type of the value (as a tree).
3453 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3454 otherwise, FUNC is 0. */
3456 ix86_function_value (tree valtype, tree fntype_or_decl,
3457 bool outgoing ATTRIBUTE_UNUSED)
3459 enum machine_mode natmode = type_natural_mode (valtype);
3463 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3464 1, REGPARM_MAX, SSE_REGPARM_MAX,
3465 x86_64_int_return_registers, 0);
3466 /* For zero sized structures, construct_container return NULL, but we
3467 need to keep rest of compiler happy by returning meaningful value. */
3469 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3474 tree fn = NULL_TREE, fntype;
3476 && DECL_P (fntype_or_decl))
3477 fn = fntype_or_decl;
3478 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
3479 return gen_rtx_REG (TYPE_MODE (valtype),
3480 ix86_value_regno (natmode, fn, fntype));
3484 /* Return false iff type is returned in memory. */
3486 ix86_return_in_memory (tree type)
3488 int needed_intregs, needed_sseregs, size;
3489 enum machine_mode mode = type_natural_mode (type);
3492 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3494 if (mode == BLKmode)
3497 size = int_size_in_bytes (type);
3499 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3502 if (VECTOR_MODE_P (mode) || mode == TImode)
3504 /* User-created vectors small enough to fit in EAX. */
3508 /* MMX/3dNow values are returned in MM0,
3509 except when it doesn't exits. */
3511 return (TARGET_MMX ? 0 : 1);
3513 /* SSE values are returned in XMM0, except when it doesn't exist. */
3515 return (TARGET_SSE ? 0 : 1);
3526 /* When returning SSE vector types, we have a choice of either
3527 (1) being abi incompatible with a -march switch, or
3528 (2) generating an error.
3529 Given no good solution, I think the safest thing is one warning.
3530 The user won't be able to use -Werror, but....
3532 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3533 called in response to actually generating a caller or callee that
3534 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3535 via aggregate_value_p for general type probing from tree-ssa. */
3538 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3540 static bool warnedsse, warnedmmx;
3544 /* Look at the return type of the function, not the function type. */
3545 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3547 if (!TARGET_SSE && !warnedsse)
3550 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3553 warning (0, "SSE vector return without SSE enabled "
3558 if (!TARGET_MMX && !warnedmmx)
3560 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
3563 warning (0, "MMX vector return without MMX enabled "
3572 /* Define how to find the value returned by a library function
3573 assuming the value has mode MODE. */
3575 ix86_libcall_value (enum machine_mode mode)
3586 return gen_rtx_REG (mode, FIRST_SSE_REG);
3589 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3593 return gen_rtx_REG (mode, 0);
3597 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
3600 /* Given a mode, return the register to use for a return value. */
3603 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
3605 gcc_assert (!TARGET_64BIT);
3607 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
3608 we prevent this case when mmx is not available. */
3609 if ((VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8))
3610 return FIRST_MMX_REG;
3612 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3613 we prevent this case when sse is not available. */
3614 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3615 return FIRST_SSE_REG;
3617 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
3618 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
3621 /* Floating point return values in %st(0), except for local functions when
3622 SSE math is enabled or for functions with sseregparm attribute. */
3623 if ((func || fntype)
3624 && (mode == SFmode || mode == DFmode))
3626 int sse_level = ix86_function_sseregparm (fntype, func);
3627 if ((sse_level >= 1 && mode == SFmode)
3628 || (sse_level == 2 && mode == DFmode))
3629 return FIRST_SSE_REG;
3632 return FIRST_FLOAT_REG;
3635 /* Create the va_list data type. */
3638 ix86_build_builtin_va_list (void)
3640 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3642 /* For i386 we use plain pointer to argument area. */
3644 return build_pointer_type (char_type_node);
3646 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3647 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3649 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3650 unsigned_type_node);
3651 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3652 unsigned_type_node);
3653 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3655 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3658 va_list_gpr_counter_field = f_gpr;
3659 va_list_fpr_counter_field = f_fpr;
3661 DECL_FIELD_CONTEXT (f_gpr) = record;
3662 DECL_FIELD_CONTEXT (f_fpr) = record;
3663 DECL_FIELD_CONTEXT (f_ovf) = record;
3664 DECL_FIELD_CONTEXT (f_sav) = record;
3666 TREE_CHAIN (record) = type_decl;
3667 TYPE_NAME (record) = type_decl;
3668 TYPE_FIELDS (record) = f_gpr;
3669 TREE_CHAIN (f_gpr) = f_fpr;
3670 TREE_CHAIN (f_fpr) = f_ovf;
3671 TREE_CHAIN (f_ovf) = f_sav;
3673 layout_type (record);
3675 /* The correct type is an array type of one element. */
3676 return build_array_type (record, build_index_type (size_zero_node));
3679 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3682 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3683 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3686 CUMULATIVE_ARGS next_cum;
3687 rtx save_area = NULL_RTX, mem;
3700 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
3703 /* Indicate to allocate space on the stack for varargs save area. */
3704 ix86_save_varrargs_registers = 1;
3706 cfun->stack_alignment_needed = 128;
3708 fntype = TREE_TYPE (current_function_decl);
3709 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3710 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3711 != void_type_node));
3713 /* For varargs, we do not want to skip the dummy va_dcl argument.
3714 For stdargs, we do want to skip the last named argument. */
3717 function_arg_advance (&next_cum, mode, type, 1);
3720 save_area = frame_pointer_rtx;
3722 set = get_varargs_alias_set ();
3724 for (i = next_cum.regno;
3726 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
3729 mem = gen_rtx_MEM (Pmode,
3730 plus_constant (save_area, i * UNITS_PER_WORD));
3731 MEM_NOTRAP_P (mem) = 1;
3732 set_mem_alias_set (mem, set);
3733 emit_move_insn (mem, gen_rtx_REG (Pmode,
3734 x86_64_int_parameter_registers[i]));
3737 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
3739 /* Now emit code to save SSE registers. The AX parameter contains number
3740 of SSE parameter registers used to call this function. We use
3741 sse_prologue_save insn template that produces computed jump across
3742 SSE saves. We need some preparation work to get this working. */
3744 label = gen_label_rtx ();
3745 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3747 /* Compute address to jump to :
3748 label - 5*eax + nnamed_sse_arguments*5 */
3749 tmp_reg = gen_reg_rtx (Pmode);
3750 nsse_reg = gen_reg_rtx (Pmode);
3751 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3752 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3753 gen_rtx_MULT (Pmode, nsse_reg,
3755 if (next_cum.sse_regno)
3758 gen_rtx_CONST (DImode,
3759 gen_rtx_PLUS (DImode,
3761 GEN_INT (next_cum.sse_regno * 4))));
3763 emit_move_insn (nsse_reg, label_ref);
3764 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3766 /* Compute address of memory block we save into. We always use pointer
3767 pointing 127 bytes after first byte to store - this is needed to keep
3768 instruction size limited by 4 bytes. */
3769 tmp_reg = gen_reg_rtx (Pmode);
3770 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3771 plus_constant (save_area,
3772 8 * REGPARM_MAX + 127)));
3773 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3774 MEM_NOTRAP_P (mem) = 1;
3775 set_mem_alias_set (mem, set);
3776 set_mem_align (mem, BITS_PER_WORD);
3778 /* And finally do the dirty job! */
3779 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3780 GEN_INT (next_cum.sse_regno), label));
3785 /* Implement va_start. */
3788 ix86_va_start (tree valist, rtx nextarg)
3790 HOST_WIDE_INT words, n_gpr, n_fpr;
3791 tree f_gpr, f_fpr, f_ovf, f_sav;
3792 tree gpr, fpr, ovf, sav, t;
3794 /* Only 64bit target needs something special. */
3797 std_expand_builtin_va_start (valist, nextarg);
3801 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3802 f_fpr = TREE_CHAIN (f_gpr);
3803 f_ovf = TREE_CHAIN (f_fpr);
3804 f_sav = TREE_CHAIN (f_ovf);
3806 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3807 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3808 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3809 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3810 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3812 /* Count number of gp and fp argument registers used. */
3813 words = current_function_args_info.words;
3814 n_gpr = current_function_args_info.regno;
3815 n_fpr = current_function_args_info.sse_regno;
3817 if (TARGET_DEBUG_ARG)
3818 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3819 (int) words, (int) n_gpr, (int) n_fpr);
3821 if (cfun->va_list_gpr_size)
3823 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3824 build_int_cst (NULL_TREE, n_gpr * 8));
3825 TREE_SIDE_EFFECTS (t) = 1;
3826 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3829 if (cfun->va_list_fpr_size)
3831 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3832 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3833 TREE_SIDE_EFFECTS (t) = 1;
3834 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3837 /* Find the overflow area. */
3838 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3840 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), t,
3841 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3842 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3843 TREE_SIDE_EFFECTS (t) = 1;
3844 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3846 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
3848 /* Find the register save area.
3849 Prologue of the function save it right above stack frame. */
3850 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3851 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3852 TREE_SIDE_EFFECTS (t) = 1;
3853 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3857 /* Implement va_arg. */
3860 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3862 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3863 tree f_gpr, f_fpr, f_ovf, f_sav;
3864 tree gpr, fpr, ovf, sav, t;
3866 tree lab_false, lab_over = NULL_TREE;
3871 enum machine_mode nat_mode;
3873 /* Only 64bit target needs something special. */
3875 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3877 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3878 f_fpr = TREE_CHAIN (f_gpr);
3879 f_ovf = TREE_CHAIN (f_fpr);
3880 f_sav = TREE_CHAIN (f_ovf);
3882 valist = build_va_arg_indirect_ref (valist);
3883 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3884 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3885 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3886 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3888 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3890 type = build_pointer_type (type);
3891 size = int_size_in_bytes (type);
3892 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3894 nat_mode = type_natural_mode (type);
3895 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3896 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3898 /* Pull the value out of the saved registers. */
3900 addr = create_tmp_var (ptr_type_node, "addr");
3901 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3905 int needed_intregs, needed_sseregs;
3907 tree int_addr, sse_addr;
3909 lab_false = create_artificial_label ();
3910 lab_over = create_artificial_label ();
3912 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3914 need_temp = (!REG_P (container)
3915 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3916 || TYPE_ALIGN (type) > 128));
3918 /* In case we are passing structure, verify that it is consecutive block
3919 on the register save area. If not we need to do moves. */
3920 if (!need_temp && !REG_P (container))
3922 /* Verify that all registers are strictly consecutive */
3923 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3927 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3929 rtx slot = XVECEXP (container, 0, i);
3930 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3931 || INTVAL (XEXP (slot, 1)) != i * 16)
3939 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3941 rtx slot = XVECEXP (container, 0, i);
3942 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3943 || INTVAL (XEXP (slot, 1)) != i * 8)
3955 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3956 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3957 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3958 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3961 /* First ensure that we fit completely in registers. */
3964 t = build_int_cst (TREE_TYPE (gpr),
3965 (REGPARM_MAX - needed_intregs + 1) * 8);
3966 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3967 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3968 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3969 gimplify_and_add (t, pre_p);
3973 t = build_int_cst (TREE_TYPE (fpr),
3974 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3976 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3977 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3978 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3979 gimplify_and_add (t, pre_p);
3982 /* Compute index to start of area used for integer regs. */
3985 /* int_addr = gpr + sav; */
3986 t = fold_convert (ptr_type_node, gpr);
3987 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3988 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3989 gimplify_and_add (t, pre_p);
3993 /* sse_addr = fpr + sav; */
3994 t = fold_convert (ptr_type_node, fpr);
3995 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3996 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3997 gimplify_and_add (t, pre_p);
4002 tree temp = create_tmp_var (type, "va_arg_tmp");
4005 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4006 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
4007 gimplify_and_add (t, pre_p);
4009 for (i = 0; i < XVECLEN (container, 0); i++)
4011 rtx slot = XVECEXP (container, 0, i);
4012 rtx reg = XEXP (slot, 0);
4013 enum machine_mode mode = GET_MODE (reg);
4014 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4015 tree addr_type = build_pointer_type (piece_type);
4018 tree dest_addr, dest;
4020 if (SSE_REGNO_P (REGNO (reg)))
4022 src_addr = sse_addr;
4023 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4027 src_addr = int_addr;
4028 src_offset = REGNO (reg) * 8;
4030 src_addr = fold_convert (addr_type, src_addr);
4031 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4032 size_int (src_offset)));
4033 src = build_va_arg_indirect_ref (src_addr);
4035 dest_addr = fold_convert (addr_type, addr);
4036 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4037 size_int (INTVAL (XEXP (slot, 1)))));
4038 dest = build_va_arg_indirect_ref (dest_addr);
4040 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
4041 gimplify_and_add (t, pre_p);
4047 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4048 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4049 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
4050 gimplify_and_add (t, pre_p);
4054 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4055 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4056 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
4057 gimplify_and_add (t, pre_p);
4060 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4061 gimplify_and_add (t, pre_p);
4063 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4064 append_to_statement_list (t, pre_p);
4067 /* ... otherwise out of the overflow area. */
4069 /* Care for on-stack alignment if needed. */
4070 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
4074 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4075 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4076 build_int_cst (TREE_TYPE (ovf), align - 1));
4077 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4078 build_int_cst (TREE_TYPE (t), -align));
4080 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4082 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
4083 gimplify_and_add (t2, pre_p);
4085 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4086 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4087 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
4088 gimplify_and_add (t, pre_p);
4092 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4093 append_to_statement_list (t, pre_p);
4096 ptrtype = build_pointer_type (type);
4097 addr = fold_convert (ptrtype, addr);
4100 addr = build_va_arg_indirect_ref (addr);
4101 return build_va_arg_indirect_ref (addr);
4104 /* Return nonzero if OPNUM's MEM should be matched
4105 in movabs* patterns. */
4108 ix86_check_movabs (rtx insn, int opnum)
4112 set = PATTERN (insn);
4113 if (GET_CODE (set) == PARALLEL)
4114 set = XVECEXP (set, 0, 0);
4115 gcc_assert (GET_CODE (set) == SET);
4116 mem = XEXP (set, opnum);
4117 while (GET_CODE (mem) == SUBREG)
4118 mem = SUBREG_REG (mem);
4119 gcc_assert (GET_CODE (mem) == MEM);
4120 return (volatile_ok || !MEM_VOLATILE_P (mem));
4123 /* Initialize the table of extra 80387 mathematical constants. */
4126 init_ext_80387_constants (void)
4128 static const char * cst[5] =
4130 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4131 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4132 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4133 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4134 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4138 for (i = 0; i < 5; i++)
4140 real_from_string (&ext_80387_constants_table[i], cst[i]);
4141 /* Ensure each constant is rounded to XFmode precision. */
4142 real_convert (&ext_80387_constants_table[i],
4143 XFmode, &ext_80387_constants_table[i]);
4146 ext_80387_constants_init = 1;
4149 /* Return true if the constant is something that can be loaded with
4150 a special instruction. */
4153 standard_80387_constant_p (rtx x)
4155 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4158 if (x == CONST0_RTX (GET_MODE (x)))
4160 if (x == CONST1_RTX (GET_MODE (x)))
4163 /* For XFmode constants, try to find a special 80387 instruction when
4164 optimizing for size or on those CPUs that benefit from them. */
4165 if (GET_MODE (x) == XFmode
4166 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4171 if (! ext_80387_constants_init)
4172 init_ext_80387_constants ();
4174 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4175 for (i = 0; i < 5; i++)
4176 if (real_identical (&r, &ext_80387_constants_table[i]))
4183 /* Return the opcode of the special instruction to be used to load
4187 standard_80387_constant_opcode (rtx x)
4189 switch (standard_80387_constant_p (x))
4210 /* Return the CONST_DOUBLE representing the 80387 constant that is
4211 loaded by the specified special instruction. The argument IDX
4212 matches the return value from standard_80387_constant_p. */
4215 standard_80387_constant_rtx (int idx)
4219 if (! ext_80387_constants_init)
4220 init_ext_80387_constants ();
4236 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4240 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4243 standard_sse_constant_p (rtx x)
4245 if (x == const0_rtx)
4247 return (x == CONST0_RTX (GET_MODE (x)));
4250 /* Returns 1 if OP contains a symbol reference */
4253 symbolic_reference_mentioned_p (rtx op)
4258 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4261 fmt = GET_RTX_FORMAT (GET_CODE (op));
4262 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4268 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4269 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4273 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4280 /* Return 1 if it is appropriate to emit `ret' instructions in the
4281 body of a function. Do this only if the epilogue is simple, needing a
4282 couple of insns. Prior to reloading, we can't tell how many registers
4283 must be saved, so return 0 then. Return 0 if there is no frame
4284 marker to de-allocate. */
4287 ix86_can_use_return_insn_p (void)
4289 struct ix86_frame frame;
4291 if (! reload_completed || frame_pointer_needed)
4294 /* Don't allow more than 32 pop, since that's all we can do
4295 with one instruction. */
4296 if (current_function_pops_args
4297 && current_function_args_size >= 32768)
4300 ix86_compute_frame_layout (&frame);
4301 return frame.to_allocate == 0 && frame.nregs == 0;
4304 /* Value should be nonzero if functions must have frame pointers.
4305 Zero means the frame pointer need not be set up (and parms may
4306 be accessed via the stack pointer) in functions that seem suitable. */
4309 ix86_frame_pointer_required (void)
4311 /* If we accessed previous frames, then the generated code expects
4312 to be able to access the saved ebp value in our frame. */
4313 if (cfun->machine->accesses_prev_frame)
4316 /* Several x86 os'es need a frame pointer for other reasons,
4317 usually pertaining to setjmp. */
4318 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4321 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4322 the frame pointer by default. Turn it back on now if we've not
4323 got a leaf function. */
4324 if (TARGET_OMIT_LEAF_FRAME_POINTER
4325 && (!current_function_is_leaf))
4328 if (current_function_profile)
4334 /* Record that the current function accesses previous call frames. */
4337 ix86_setup_frame_addresses (void)
4339 cfun->machine->accesses_prev_frame = 1;
4342 #if defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)
4343 # define USE_HIDDEN_LINKONCE 1
4345 # define USE_HIDDEN_LINKONCE 0
4348 static int pic_labels_used;
4350 /* Fills in the label name that should be used for a pc thunk for
4351 the given register. */
4354 get_pc_thunk_name (char name[32], unsigned int regno)
4356 if (USE_HIDDEN_LINKONCE)
4357 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
4359 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
4363 /* This function generates code for -fpic that loads %ebx with
4364 the return address of the caller and then returns. */
4367 ix86_file_end (void)
4372 for (regno = 0; regno < 8; ++regno)
4376 if (! ((pic_labels_used >> regno) & 1))
4379 get_pc_thunk_name (name, regno);
4381 if (USE_HIDDEN_LINKONCE)
4385 decl = build_decl (FUNCTION_DECL, get_identifier (name),
4387 TREE_PUBLIC (decl) = 1;
4388 TREE_STATIC (decl) = 1;
4389 DECL_ONE_ONLY (decl) = 1;
4391 (*targetm.asm_out.unique_section) (decl, 0);
4392 named_section (decl, NULL, 0);
4394 (*targetm.asm_out.globalize_label) (asm_out_file, name);
4395 fputs ("\t.hidden\t", asm_out_file);
4396 assemble_name (asm_out_file, name);
4397 fputc ('\n', asm_out_file);
4398 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
4403 ASM_OUTPUT_LABEL (asm_out_file, name);
4406 xops[0] = gen_rtx_REG (SImode, regno);
4407 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
4408 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
4409 output_asm_insn ("ret", xops);
4412 if (NEED_INDICATE_EXEC_STACK)
4413 file_end_indicate_exec_stack ();
4416 /* Emit code for the SET_GOT patterns. */
4419 output_set_got (rtx dest)
4424 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
4426 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
4428 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
4431 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4433 output_asm_insn ("call\t%a2", xops);
4436 /* Output the "canonical" label name ("Lxx$pb") here too. This
4437 is what will be referred to by the Mach-O PIC subsystem. */
4438 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4440 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4441 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
4444 output_asm_insn ("pop{l}\t%0", xops);
4449 get_pc_thunk_name (name, REGNO (dest));
4450 pic_labels_used |= 1 << REGNO (dest);
4452 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4453 xops[2] = gen_rtx_MEM (QImode, xops[2]);
4454 output_asm_insn ("call\t%X2", xops);
4457 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
4458 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
4459 else if (!TARGET_MACHO)
4460 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
4465 /* Generate an "push" pattern for input ARG. */
4470 return gen_rtx_SET (VOIDmode,
4472 gen_rtx_PRE_DEC (Pmode,
4473 stack_pointer_rtx)),
4477 /* Return >= 0 if there is an unused call-clobbered register available
4478 for the entire function. */
4481 ix86_select_alt_pic_regnum (void)
4483 if (current_function_is_leaf && !current_function_profile)
4486 for (i = 2; i >= 0; --i)
4487 if (!regs_ever_live[i])
4491 return INVALID_REGNUM;
4494 /* Return 1 if we need to save REGNO. */
4496 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4498 if (pic_offset_table_rtx
4499 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4500 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4501 || current_function_profile
4502 || current_function_calls_eh_return
4503 || current_function_uses_const_pool))
4505 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4510 if (current_function_calls_eh_return && maybe_eh_return)
4515 unsigned test = EH_RETURN_DATA_REGNO (i);
4516 if (test == INVALID_REGNUM)
4523 if (cfun->machine->force_align_arg_pointer
4524 && regno == REGNO (cfun->machine->force_align_arg_pointer))
4527 return (regs_ever_live[regno]
4528 && !call_used_regs[regno]
4529 && !fixed_regs[regno]
4530 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4533 /* Return number of registers to be saved on the stack. */
4536 ix86_nsaved_regs (void)
4541 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4542 if (ix86_save_reg (regno, true))
4547 /* Return the offset between two registers, one to be eliminated, and the other
4548 its replacement, at the start of a routine. */
4551 ix86_initial_elimination_offset (int from, int to)
4553 struct ix86_frame frame;
4554 ix86_compute_frame_layout (&frame);
4556 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4557 return frame.hard_frame_pointer_offset;
4558 else if (from == FRAME_POINTER_REGNUM
4559 && to == HARD_FRAME_POINTER_REGNUM)
4560 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4563 gcc_assert (to == STACK_POINTER_REGNUM);
4565 if (from == ARG_POINTER_REGNUM)
4566 return frame.stack_pointer_offset;
4568 gcc_assert (from == FRAME_POINTER_REGNUM);
4569 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4573 /* Fill structure ix86_frame about frame of currently computed function. */
4576 ix86_compute_frame_layout (struct ix86_frame *frame)
4578 HOST_WIDE_INT total_size;
4579 unsigned int stack_alignment_needed;
4580 HOST_WIDE_INT offset;
4581 unsigned int preferred_alignment;
4582 HOST_WIDE_INT size = get_frame_size ();
4584 frame->nregs = ix86_nsaved_regs ();
4587 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4588 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4590 /* During reload iteration the amount of registers saved can change.
4591 Recompute the value as needed. Do not recompute when amount of registers
4592 didn't change as reload does multiple calls to the function and does not
4593 expect the decision to change within single iteration. */
4595 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4597 int count = frame->nregs;
4599 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4600 /* The fast prologue uses move instead of push to save registers. This
4601 is significantly longer, but also executes faster as modern hardware
4602 can execute the moves in parallel, but can't do that for push/pop.
4604 Be careful about choosing what prologue to emit: When function takes
4605 many instructions to execute we may use slow version as well as in
4606 case function is known to be outside hot spot (this is known with
4607 feedback only). Weight the size of function by number of registers
4608 to save as it is cheap to use one or two push instructions but very
4609 slow to use many of them. */
4611 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4612 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4613 || (flag_branch_probabilities
4614 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4615 cfun->machine->use_fast_prologue_epilogue = false;
4617 cfun->machine->use_fast_prologue_epilogue
4618 = !expensive_function_p (count);
4620 if (TARGET_PROLOGUE_USING_MOVE
4621 && cfun->machine->use_fast_prologue_epilogue)
4622 frame->save_regs_using_mov = true;
4624 frame->save_regs_using_mov = false;
4627 /* Skip return address and saved base pointer. */
4628 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4630 frame->hard_frame_pointer_offset = offset;
4632 /* Do some sanity checking of stack_alignment_needed and
4633 preferred_alignment, since i386 port is the only using those features
4634 that may break easily. */
4636 gcc_assert (!size || stack_alignment_needed);
4637 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
4638 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4639 gcc_assert (stack_alignment_needed
4640 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4642 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4643 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4645 /* Register save area */
4646 offset += frame->nregs * UNITS_PER_WORD;
4649 if (ix86_save_varrargs_registers)
4651 offset += X86_64_VARARGS_SIZE;
4652 frame->va_arg_size = X86_64_VARARGS_SIZE;
4655 frame->va_arg_size = 0;
4657 /* Align start of frame for local function. */
4658 frame->padding1 = ((offset + stack_alignment_needed - 1)
4659 & -stack_alignment_needed) - offset;
4661 offset += frame->padding1;
4663 /* Frame pointer points here. */
4664 frame->frame_pointer_offset = offset;
4668 /* Add outgoing arguments area. Can be skipped if we eliminated
4669 all the function calls as dead code.
4670 Skipping is however impossible when function calls alloca. Alloca
4671 expander assumes that last current_function_outgoing_args_size
4672 of stack frame are unused. */
4673 if (ACCUMULATE_OUTGOING_ARGS
4674 && (!current_function_is_leaf || current_function_calls_alloca))
4676 offset += current_function_outgoing_args_size;
4677 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4680 frame->outgoing_arguments_size = 0;
4682 /* Align stack boundary. Only needed if we're calling another function
4684 if (!current_function_is_leaf || current_function_calls_alloca)
4685 frame->padding2 = ((offset + preferred_alignment - 1)
4686 & -preferred_alignment) - offset;
4688 frame->padding2 = 0;
4690 offset += frame->padding2;
4692 /* We've reached end of stack frame. */
4693 frame->stack_pointer_offset = offset;
4695 /* Size prologue needs to allocate. */
4696 frame->to_allocate =
4697 (size + frame->padding1 + frame->padding2
4698 + frame->outgoing_arguments_size + frame->va_arg_size);
4700 if ((!frame->to_allocate && frame->nregs <= 1)
4701 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4702 frame->save_regs_using_mov = false;
4704 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4705 && current_function_is_leaf)
4707 frame->red_zone_size = frame->to_allocate;
4708 if (frame->save_regs_using_mov)
4709 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4710 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4711 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4714 frame->red_zone_size = 0;
4715 frame->to_allocate -= frame->red_zone_size;
4716 frame->stack_pointer_offset -= frame->red_zone_size;
4718 fprintf (stderr, "nregs: %i\n", frame->nregs);
4719 fprintf (stderr, "size: %i\n", size);
4720 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4721 fprintf (stderr, "padding1: %i\n", frame->padding1);
4722 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4723 fprintf (stderr, "padding2: %i\n", frame->padding2);
4724 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4725 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4726 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4727 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4728 frame->hard_frame_pointer_offset);
4729 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4733 /* Emit code to save registers in the prologue. */
4736 ix86_emit_save_regs (void)
4741 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
4742 if (ix86_save_reg (regno, true))
4744 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4745 RTX_FRAME_RELATED_P (insn) = 1;
4749 /* Emit code to save registers using MOV insns. First register
4750 is restored from POINTER + OFFSET. */
4752 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4757 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4758 if (ix86_save_reg (regno, true))
4760 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4762 gen_rtx_REG (Pmode, regno));
4763 RTX_FRAME_RELATED_P (insn) = 1;
4764 offset += UNITS_PER_WORD;
4768 /* Expand prologue or epilogue stack adjustment.
4769 The pattern exist to put a dependency on all ebp-based memory accesses.
4770 STYLE should be negative if instructions should be marked as frame related,
4771 zero if %r11 register is live and cannot be freely used and positive
4775 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4780 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4781 else if (x86_64_immediate_operand (offset, DImode))
4782 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4786 /* r11 is used by indirect sibcall return as well, set before the
4787 epilogue and used after the epilogue. ATM indirect sibcall
4788 shouldn't be used together with huge frame sizes in one
4789 function because of the frame_size check in sibcall.c. */
4791 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4792 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4794 RTX_FRAME_RELATED_P (insn) = 1;
4795 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4799 RTX_FRAME_RELATED_P (insn) = 1;
4802 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
4805 ix86_internal_arg_pointer (void)
4807 if (FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
4808 && DECL_NAME (current_function_decl)
4809 && MAIN_NAME_P (DECL_NAME (current_function_decl))
4810 && DECL_FILE_SCOPE_P (current_function_decl))
4812 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
4813 return copy_to_reg (cfun->machine->force_align_arg_pointer);
4816 return virtual_incoming_args_rtx;
4819 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
4820 This is called from dwarf2out.c to emit call frame instructions
4821 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
4823 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
4825 rtx unspec = SET_SRC (pattern);
4826 gcc_assert (GET_CODE (unspec) == UNSPEC);
4830 case UNSPEC_REG_SAVE:
4831 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
4832 SET_DEST (pattern));
4834 case UNSPEC_DEF_CFA:
4835 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
4836 INTVAL (XVECEXP (unspec, 0, 0)));
4843 /* Expand the prologue into a bunch of separate insns. */
4846 ix86_expand_prologue (void)
4850 struct ix86_frame frame;
4851 HOST_WIDE_INT allocate;
4853 ix86_compute_frame_layout (&frame);
4855 if (cfun->machine->force_align_arg_pointer)
4859 /* Grab the argument pointer. */
4860 x = plus_constant (stack_pointer_rtx, 4);
4861 y = cfun->machine->force_align_arg_pointer;
4862 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
4863 RTX_FRAME_RELATED_P (insn) = 1;
4865 /* The unwind info consists of two parts: install the fafp as the cfa,
4866 and record the fafp as the "save register" of the stack pointer.
4867 The later is there in order that the unwinder can see where it
4868 should restore the stack pointer across the and insn. */
4869 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
4870 x = gen_rtx_SET (VOIDmode, y, x);
4871 RTX_FRAME_RELATED_P (x) = 1;
4872 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
4874 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
4875 RTX_FRAME_RELATED_P (y) = 1;
4876 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
4877 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
4878 REG_NOTES (insn) = x;
4880 /* Align the stack. */
4881 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
4884 /* And here we cheat like madmen with the unwind info. We force the
4885 cfa register back to sp+4, which is exactly what it was at the
4886 start of the function. Re-pushing the return address results in
4887 the return at the same spot relative to the cfa, and thus is
4888 correct wrt the unwind info. */
4889 x = cfun->machine->force_align_arg_pointer;
4890 x = gen_frame_mem (Pmode, plus_constant (x, -4));
4891 insn = emit_insn (gen_push (x));
4892 RTX_FRAME_RELATED_P (insn) = 1;
4895 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
4896 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
4897 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
4898 REG_NOTES (insn) = x;
4901 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4902 slower on all targets. Also sdb doesn't like it. */
4904 if (frame_pointer_needed)
4906 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4907 RTX_FRAME_RELATED_P (insn) = 1;
4909 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4910 RTX_FRAME_RELATED_P (insn) = 1;
4913 allocate = frame.to_allocate;
4915 if (!frame.save_regs_using_mov)
4916 ix86_emit_save_regs ();
4918 allocate += frame.nregs * UNITS_PER_WORD;
4920 /* When using red zone we may start register saving before allocating
4921 the stack frame saving one cycle of the prologue. */
4922 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4923 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4924 : stack_pointer_rtx,
4925 -frame.nregs * UNITS_PER_WORD);
4929 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4930 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4931 GEN_INT (-allocate), -1);
4934 /* Only valid for Win32. */
4935 rtx eax = gen_rtx_REG (SImode, 0);
4936 bool eax_live = ix86_eax_live_at_start_p ();
4939 gcc_assert (!TARGET_64BIT);
4943 emit_insn (gen_push (eax));
4947 emit_move_insn (eax, GEN_INT (allocate));
4949 insn = emit_insn (gen_allocate_stack_worker (eax));
4950 RTX_FRAME_RELATED_P (insn) = 1;
4951 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4952 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4953 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4954 t, REG_NOTES (insn));
4958 if (frame_pointer_needed)
4959 t = plus_constant (hard_frame_pointer_rtx,
4962 - frame.nregs * UNITS_PER_WORD);
4964 t = plus_constant (stack_pointer_rtx, allocate);
4965 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4969 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4971 if (!frame_pointer_needed || !frame.to_allocate)
4972 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4974 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4975 -frame.nregs * UNITS_PER_WORD);
4978 pic_reg_used = false;
4979 if (pic_offset_table_rtx
4980 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4981 || current_function_profile))
4983 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4985 if (alt_pic_reg_used != INVALID_REGNUM)
4986 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4988 pic_reg_used = true;
4994 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
4996 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4998 /* Even with accurate pre-reload life analysis, we can wind up
4999 deleting all references to the pic register after reload.
5000 Consider if cross-jumping unifies two sides of a branch
5001 controlled by a comparison vs the only read from a global.
5002 In which case, allow the set_got to be deleted, though we're
5003 too late to do anything about the ebx save in the prologue. */
5004 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5007 /* Prevent function calls from be scheduled before the call to mcount.
5008 In the pic_reg_used case, make sure that the got load isn't deleted. */
5009 if (current_function_profile)
5010 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5013 /* Emit code to restore saved registers using MOV insns. First register
5014 is restored from POINTER + OFFSET. */
5016 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5017 int maybe_eh_return)
5020 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5022 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5023 if (ix86_save_reg (regno, maybe_eh_return))
5025 /* Ensure that adjust_address won't be forced to produce pointer
5026 out of range allowed by x86-64 instruction set. */
5027 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5031 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
5032 emit_move_insn (r11, GEN_INT (offset));
5033 emit_insn (gen_adddi3 (r11, r11, pointer));
5034 base_address = gen_rtx_MEM (Pmode, r11);
5037 emit_move_insn (gen_rtx_REG (Pmode, regno),
5038 adjust_address (base_address, Pmode, offset));
5039 offset += UNITS_PER_WORD;
5043 /* Restore function stack, frame, and registers. */
5046 ix86_expand_epilogue (int style)
5049 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5050 struct ix86_frame frame;
5051 HOST_WIDE_INT offset;
5053 ix86_compute_frame_layout (&frame);
5055 /* Calculate start of saved registers relative to ebp. Special care
5056 must be taken for the normal return case of a function using
5057 eh_return: the eax and edx registers are marked as saved, but not
5058 restored along this path. */
5059 offset = frame.nregs;
5060 if (current_function_calls_eh_return && style != 2)
5062 offset *= -UNITS_PER_WORD;
5064 /* If we're only restoring one register and sp is not valid then
5065 using a move instruction to restore the register since it's
5066 less work than reloading sp and popping the register.
5068 The default code result in stack adjustment using add/lea instruction,
5069 while this code results in LEAVE instruction (or discrete equivalent),
5070 so it is profitable in some other cases as well. Especially when there
5071 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5072 and there is exactly one register to pop. This heuristic may need some
5073 tuning in future. */
5074 if ((!sp_valid && frame.nregs <= 1)
5075 || (TARGET_EPILOGUE_USING_MOVE
5076 && cfun->machine->use_fast_prologue_epilogue
5077 && (frame.nregs > 1 || frame.to_allocate))
5078 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5079 || (frame_pointer_needed && TARGET_USE_LEAVE
5080 && cfun->machine->use_fast_prologue_epilogue
5081 && frame.nregs == 1)
5082 || current_function_calls_eh_return)
5084 /* Restore registers. We can use ebp or esp to address the memory
5085 locations. If both are available, default to ebp, since offsets
5086 are known to be small. Only exception is esp pointing directly to the
5087 end of block of saved registers, where we may simplify addressing
5090 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5091 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5092 frame.to_allocate, style == 2);
5094 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5095 offset, style == 2);
5097 /* eh_return epilogues need %ecx added to the stack pointer. */
5100 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5102 if (frame_pointer_needed)
5104 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5105 tmp = plus_constant (tmp, UNITS_PER_WORD);
5106 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5108 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5109 emit_move_insn (hard_frame_pointer_rtx, tmp);
5111 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5116 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5117 tmp = plus_constant (tmp, (frame.to_allocate
5118 + frame.nregs * UNITS_PER_WORD));
5119 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5122 else if (!frame_pointer_needed)
5123 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5124 GEN_INT (frame.to_allocate
5125 + frame.nregs * UNITS_PER_WORD),
5127 /* If not an i386, mov & pop is faster than "leave". */
5128 else if (TARGET_USE_LEAVE || optimize_size
5129 || !cfun->machine->use_fast_prologue_epilogue)
5130 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5133 pro_epilogue_adjust_stack (stack_pointer_rtx,
5134 hard_frame_pointer_rtx,
5137 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5139 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5144 /* First step is to deallocate the stack frame so that we can
5145 pop the registers. */
5148 gcc_assert (frame_pointer_needed);
5149 pro_epilogue_adjust_stack (stack_pointer_rtx,
5150 hard_frame_pointer_rtx,
5151 GEN_INT (offset), style);
5153 else if (frame.to_allocate)
5154 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5155 GEN_INT (frame.to_allocate), style);
5157 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5158 if (ix86_save_reg (regno, false))
5161 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5163 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5165 if (frame_pointer_needed)
5167 /* Leave results in shorter dependency chains on CPUs that are
5168 able to grok it fast. */
5169 if (TARGET_USE_LEAVE)
5170 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5171 else if (TARGET_64BIT)
5172 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5174 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5178 if (cfun->machine->force_align_arg_pointer)
5180 emit_insn (gen_addsi3 (stack_pointer_rtx,
5181 cfun->machine->force_align_arg_pointer,
5185 /* Sibcall epilogues don't want a return instruction. */
5189 if (current_function_pops_args && current_function_args_size)
5191 rtx popc = GEN_INT (current_function_pops_args);
5193 /* i386 can only pop 64K bytes. If asked to pop more, pop
5194 return address, do explicit add, and jump indirectly to the
5197 if (current_function_pops_args >= 65536)
5199 rtx ecx = gen_rtx_REG (SImode, 2);
5201 /* There is no "pascal" calling convention in 64bit ABI. */
5202 gcc_assert (!TARGET_64BIT);
5204 emit_insn (gen_popsi1 (ecx));
5205 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5206 emit_jump_insn (gen_return_indirect_internal (ecx));
5209 emit_jump_insn (gen_return_pop_internal (popc));
5212 emit_jump_insn (gen_return_internal ());
5215 /* Reset from the function's potential modifications. */
5218 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5219 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5221 if (pic_offset_table_rtx)
5222 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5225 /* Extract the parts of an RTL expression that is a valid memory address
5226 for an instruction. Return 0 if the structure of the address is
5227 grossly off. Return -1 if the address contains ASHIFT, so it is not
5228 strictly valid, but still used for computing length of lea instruction. */
5231 ix86_decompose_address (rtx addr, struct ix86_address *out)
5233 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5234 rtx base_reg, index_reg;
5235 HOST_WIDE_INT scale = 1;
5236 rtx scale_rtx = NULL_RTX;
5238 enum ix86_address_seg seg = SEG_DEFAULT;
5240 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5242 else if (GET_CODE (addr) == PLUS)
5252 addends[n++] = XEXP (op, 1);
5255 while (GET_CODE (op) == PLUS);
5260 for (i = n; i >= 0; --i)
5263 switch (GET_CODE (op))
5268 index = XEXP (op, 0);
5269 scale_rtx = XEXP (op, 1);
5273 if (XINT (op, 1) == UNSPEC_TP
5274 && TARGET_TLS_DIRECT_SEG_REFS
5275 && seg == SEG_DEFAULT)
5276 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5305 else if (GET_CODE (addr) == MULT)
5307 index = XEXP (addr, 0); /* index*scale */
5308 scale_rtx = XEXP (addr, 1);
5310 else if (GET_CODE (addr) == ASHIFT)
5314 /* We're called for lea too, which implements ashift on occasion. */
5315 index = XEXP (addr, 0);
5316 tmp = XEXP (addr, 1);
5317 if (GET_CODE (tmp) != CONST_INT)
5319 scale = INTVAL (tmp);
5320 if ((unsigned HOST_WIDE_INT) scale > 3)
5326 disp = addr; /* displacement */
5328 /* Extract the integral value of scale. */
5331 if (GET_CODE (scale_rtx) != CONST_INT)
5333 scale = INTVAL (scale_rtx);
5336 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
5337 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
5339 /* Allow arg pointer and stack pointer as index if there is not scaling. */
5340 if (base_reg && index_reg && scale == 1
5341 && (index_reg == arg_pointer_rtx
5342 || index_reg == frame_pointer_rtx
5343 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
5346 tmp = base, base = index, index = tmp;
5347 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
5350 /* Special case: %ebp cannot be encoded as a base without a displacement. */
5351 if ((base_reg == hard_frame_pointer_rtx
5352 || base_reg == frame_pointer_rtx
5353 || base_reg == arg_pointer_rtx) && !disp)
5356 /* Special case: on K6, [%esi] makes the instruction vector decoded.
5357 Avoid this by transforming to [%esi+0]. */
5358 if (ix86_tune == PROCESSOR_K6 && !optimize_size
5359 && base_reg && !index_reg && !disp
5361 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
5364 /* Special case: encode reg+reg instead of reg*2. */
5365 if (!base && index && scale && scale == 2)
5366 base = index, base_reg = index_reg, scale = 1;
5368 /* Special case: scaling cannot be encoded without base or displacement. */
5369 if (!base && !disp && index && scale != 1)
5381 /* Return cost of the memory address x.
5382 For i386, it is better to use a complex address than let gcc copy
5383 the address into a reg and make a new pseudo. But not if the address
5384 requires to two regs - that would mean more pseudos with longer
5387 ix86_address_cost (rtx x)
5389 struct ix86_address parts;
5391 int ok = ix86_decompose_address (x, &parts);
5395 if (parts.base && GET_CODE (parts.base) == SUBREG)
5396 parts.base = SUBREG_REG (parts.base);
5397 if (parts.index && GET_CODE (parts.index) == SUBREG)
5398 parts.index = SUBREG_REG (parts.index);
5400 /* More complex memory references are better. */
5401 if (parts.disp && parts.disp != const0_rtx)
5403 if (parts.seg != SEG_DEFAULT)
5406 /* Attempt to minimize number of registers in the address. */
5408 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
5410 && (!REG_P (parts.index)
5411 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
5415 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
5417 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
5418 && parts.base != parts.index)
5421 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
5422 since it's predecode logic can't detect the length of instructions
5423 and it degenerates to vector decoded. Increase cost of such
5424 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
5425 to split such addresses or even refuse such addresses at all.
5427 Following addressing modes are affected:
5432 The first and last case may be avoidable by explicitly coding the zero in
5433 memory address, but I don't have AMD-K6 machine handy to check this
5437 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
5438 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
5439 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
5445 /* If X is a machine specific address (i.e. a symbol or label being
5446 referenced as a displacement from the GOT implemented using an
5447 UNSPEC), then return the base term. Otherwise return X. */
5450 ix86_find_base_term (rtx x)
5456 if (GET_CODE (x) != CONST)
5459 if (GET_CODE (term) == PLUS
5460 && (GET_CODE (XEXP (term, 1)) == CONST_INT
5461 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
5462 term = XEXP (term, 0);
5463 if (GET_CODE (term) != UNSPEC
5464 || XINT (term, 1) != UNSPEC_GOTPCREL)
5467 term = XVECEXP (term, 0, 0);
5469 if (GET_CODE (term) != SYMBOL_REF
5470 && GET_CODE (term) != LABEL_REF)
5476 term = ix86_delegitimize_address (x);
5478 if (GET_CODE (term) != SYMBOL_REF
5479 && GET_CODE (term) != LABEL_REF)
5485 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
5486 this is used for to form addresses to local data when -fPIC is in
5490 darwin_local_data_pic (rtx disp)
5492 if (GET_CODE (disp) == MINUS)
5494 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
5495 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
5496 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
5498 const char *sym_name = XSTR (XEXP (disp, 1), 0);
5499 if (! strcmp (sym_name, "<pic base>"))
5507 /* Determine if a given RTX is a valid constant. We already know this
5508 satisfies CONSTANT_P. */
5511 legitimate_constant_p (rtx x)
5513 switch (GET_CODE (x))
5518 if (GET_CODE (x) == PLUS)
5520 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5525 if (TARGET_MACHO && darwin_local_data_pic (x))
5528 /* Only some unspecs are valid as "constants". */
5529 if (GET_CODE (x) == UNSPEC)
5530 switch (XINT (x, 1))
5533 return TARGET_64BIT;
5536 x = XVECEXP (x, 0, 0);
5537 return (GET_CODE (x) == SYMBOL_REF
5538 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
5540 x = XVECEXP (x, 0, 0);
5541 return (GET_CODE (x) == SYMBOL_REF
5542 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
5547 /* We must have drilled down to a symbol. */
5548 if (GET_CODE (x) == LABEL_REF)
5550 if (GET_CODE (x) != SYMBOL_REF)
5555 /* TLS symbols are never valid. */
5556 if (SYMBOL_REF_TLS_MODEL (x))
5564 /* Otherwise we handle everything else in the move patterns. */
5568 /* Determine if it's legal to put X into the constant pool. This
5569 is not possible for the address of thread-local symbols, which
5570 is checked above. */
5573 ix86_cannot_force_const_mem (rtx x)
5575 return !legitimate_constant_p (x);
5578 /* Determine if a given RTX is a valid constant address. */
5581 constant_address_p (rtx x)
5583 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
5586 /* Nonzero if the constant value X is a legitimate general operand
5587 when generating PIC code. It is given that flag_pic is on and
5588 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
5591 legitimate_pic_operand_p (rtx x)
5595 switch (GET_CODE (x))
5598 inner = XEXP (x, 0);
5599 if (GET_CODE (inner) == PLUS
5600 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
5601 inner = XEXP (inner, 0);
5603 /* Only some unspecs are valid as "constants". */
5604 if (GET_CODE (inner) == UNSPEC)
5605 switch (XINT (inner, 1))
5608 return TARGET_64BIT;
5610 x = XVECEXP (inner, 0, 0);
5611 return (GET_CODE (x) == SYMBOL_REF
5612 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
5620 return legitimate_pic_address_disp_p (x);
5627 /* Determine if a given CONST RTX is a valid memory displacement
5631 legitimate_pic_address_disp_p (rtx disp)
5635 /* In 64bit mode we can allow direct addresses of symbols and labels
5636 when they are not dynamic symbols. */
5639 rtx op0 = disp, op1;
5641 switch (GET_CODE (disp))
5647 if (GET_CODE (XEXP (disp, 0)) != PLUS)
5649 op0 = XEXP (XEXP (disp, 0), 0);
5650 op1 = XEXP (XEXP (disp, 0), 1);
5651 if (GET_CODE (op1) != CONST_INT
5652 || INTVAL (op1) >= 16*1024*1024
5653 || INTVAL (op1) < -16*1024*1024)
5655 if (GET_CODE (op0) == LABEL_REF)
5657 if (GET_CODE (op0) != SYMBOL_REF)
5662 /* TLS references should always be enclosed in UNSPEC. */
5663 if (SYMBOL_REF_TLS_MODEL (op0))
5665 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
5673 if (GET_CODE (disp) != CONST)
5675 disp = XEXP (disp, 0);
5679 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5680 of GOT tables. We should not need these anyway. */
5681 if (GET_CODE (disp) != UNSPEC
5682 || (XINT (disp, 1) != UNSPEC_GOTPCREL
5683 && XINT (disp, 1) != UNSPEC_GOTOFF))
5686 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5687 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5693 if (GET_CODE (disp) == PLUS)
5695 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5697 disp = XEXP (disp, 0);
5701 if (TARGET_MACHO && darwin_local_data_pic (disp))
5704 if (GET_CODE (disp) != UNSPEC)
5707 switch (XINT (disp, 1))
5712 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5714 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
5715 While ABI specify also 32bit relocation but we don't produce it in
5716 small PIC model at all. */
5717 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5718 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5720 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5722 case UNSPEC_GOTTPOFF:
5723 case UNSPEC_GOTNTPOFF:
5724 case UNSPEC_INDNTPOFF:
5727 disp = XVECEXP (disp, 0, 0);
5728 return (GET_CODE (disp) == SYMBOL_REF
5729 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
5731 disp = XVECEXP (disp, 0, 0);
5732 return (GET_CODE (disp) == SYMBOL_REF
5733 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
5735 disp = XVECEXP (disp, 0, 0);
5736 return (GET_CODE (disp) == SYMBOL_REF
5737 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
5743 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5744 memory address for an instruction. The MODE argument is the machine mode
5745 for the MEM expression that wants to use this address.
5747 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5748 convert common non-canonical forms to canonical form so that they will
5752 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5754 struct ix86_address parts;
5755 rtx base, index, disp;
5756 HOST_WIDE_INT scale;
5757 const char *reason = NULL;
5758 rtx reason_rtx = NULL_RTX;
5760 if (TARGET_DEBUG_ADDR)
5763 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5764 GET_MODE_NAME (mode), strict);
5768 if (ix86_decompose_address (addr, &parts) <= 0)
5770 reason = "decomposition failed";
5775 index = parts.index;
5777 scale = parts.scale;
5779 /* Validate base register.
5781 Don't allow SUBREG's that span more than a word here. It can lead to spill
5782 failures when the base is one word out of a two word structure, which is
5783 represented internally as a DImode int. */
5792 else if (GET_CODE (base) == SUBREG
5793 && REG_P (SUBREG_REG (base))
5794 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
5796 reg = SUBREG_REG (base);
5799 reason = "base is not a register";
5803 if (GET_MODE (base) != Pmode)
5805 reason = "base is not in Pmode";
5809 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
5810 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
5812 reason = "base is not valid";
5817 /* Validate index register.
5819 Don't allow SUBREG's that span more than a word here -- same as above. */
5828 else if (GET_CODE (index) == SUBREG
5829 && REG_P (SUBREG_REG (index))
5830 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
5832 reg = SUBREG_REG (index);
5835 reason = "index is not a register";
5839 if (GET_MODE (index) != Pmode)
5841 reason = "index is not in Pmode";
5845 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
5846 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
5848 reason = "index is not valid";
5853 /* Validate scale factor. */
5856 reason_rtx = GEN_INT (scale);
5859 reason = "scale without index";
5863 if (scale != 2 && scale != 4 && scale != 8)
5865 reason = "scale is not a valid multiplier";
5870 /* Validate displacement. */
5875 if (GET_CODE (disp) == CONST
5876 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5877 switch (XINT (XEXP (disp, 0), 1))
5879 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
5880 used. While ABI specify also 32bit relocations, we don't produce
5881 them at all and use IP relative instead. */
5884 gcc_assert (flag_pic);
5886 goto is_legitimate_pic;
5887 reason = "64bit address unspec";
5890 case UNSPEC_GOTPCREL:
5891 gcc_assert (flag_pic);
5892 goto is_legitimate_pic;
5894 case UNSPEC_GOTTPOFF:
5895 case UNSPEC_GOTNTPOFF:
5896 case UNSPEC_INDNTPOFF:
5902 reason = "invalid address unspec";
5906 else if (flag_pic && (SYMBOLIC_CONST (disp)
5908 && !machopic_operand_p (disp)
5913 if (TARGET_64BIT && (index || base))
5915 /* foo@dtpoff(%rX) is ok. */
5916 if (GET_CODE (disp) != CONST
5917 || GET_CODE (XEXP (disp, 0)) != PLUS
5918 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5919 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5920 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5921 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5923 reason = "non-constant pic memory reference";
5927 else if (! legitimate_pic_address_disp_p (disp))
5929 reason = "displacement is an invalid pic construct";
5933 /* This code used to verify that a symbolic pic displacement
5934 includes the pic_offset_table_rtx register.
5936 While this is good idea, unfortunately these constructs may
5937 be created by "adds using lea" optimization for incorrect
5946 This code is nonsensical, but results in addressing
5947 GOT table with pic_offset_table_rtx base. We can't
5948 just refuse it easily, since it gets matched by
5949 "addsi3" pattern, that later gets split to lea in the
5950 case output register differs from input. While this
5951 can be handled by separate addsi pattern for this case
5952 that never results in lea, this seems to be easier and
5953 correct fix for crash to disable this test. */
5955 else if (GET_CODE (disp) != LABEL_REF
5956 && GET_CODE (disp) != CONST_INT
5957 && (GET_CODE (disp) != CONST
5958 || !legitimate_constant_p (disp))
5959 && (GET_CODE (disp) != SYMBOL_REF
5960 || !legitimate_constant_p (disp)))
5962 reason = "displacement is not constant";
5965 else if (TARGET_64BIT
5966 && !x86_64_immediate_operand (disp, VOIDmode))
5968 reason = "displacement is out of range";
5973 /* Everything looks valid. */
5974 if (TARGET_DEBUG_ADDR)
5975 fprintf (stderr, "Success.\n");
5979 if (TARGET_DEBUG_ADDR)
5981 fprintf (stderr, "Error: %s\n", reason);
5982 debug_rtx (reason_rtx);
5987 /* Return a unique alias set for the GOT. */
5989 static HOST_WIDE_INT
5990 ix86_GOT_alias_set (void)
5992 static HOST_WIDE_INT set = -1;
5994 set = new_alias_set ();
5998 /* Return a legitimate reference for ORIG (an address) using the
5999 register REG. If REG is 0, a new pseudo is generated.
6001 There are two types of references that must be handled:
6003 1. Global data references must load the address from the GOT, via
6004 the PIC reg. An insn is emitted to do this load, and the reg is
6007 2. Static data references, constant pool addresses, and code labels
6008 compute the address as an offset from the GOT, whose base is in
6009 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6010 differentiate them from global data objects. The returned
6011 address is the PIC reg + an unspec constant.
6013 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6014 reg also appears in the address. */
6017 legitimize_pic_address (rtx orig, rtx reg)
6025 reg = gen_reg_rtx (Pmode);
6026 /* Use the generic Mach-O PIC machinery. */
6027 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6030 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6032 else if (TARGET_64BIT
6033 && ix86_cmodel != CM_SMALL_PIC
6034 && local_symbolic_operand (addr, Pmode))
6037 /* This symbol may be referenced via a displacement from the PIC
6038 base address (@GOTOFF). */
6040 if (reload_in_progress)
6041 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6042 if (GET_CODE (addr) == CONST)
6043 addr = XEXP (addr, 0);
6044 if (GET_CODE (addr) == PLUS)
6046 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6047 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6050 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6051 new = gen_rtx_CONST (Pmode, new);
6053 tmpreg = gen_reg_rtx (Pmode);
6056 emit_move_insn (tmpreg, new);
6060 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6061 tmpreg, 1, OPTAB_DIRECT);
6064 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6066 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6068 /* This symbol may be referenced via a displacement from the PIC
6069 base address (@GOTOFF). */
6071 if (reload_in_progress)
6072 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6073 if (GET_CODE (addr) == CONST)
6074 addr = XEXP (addr, 0);
6075 if (GET_CODE (addr) == PLUS)
6077 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6078 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6081 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6082 new = gen_rtx_CONST (Pmode, new);
6083 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6087 emit_move_insn (reg, new);
6091 else if (GET_CODE (addr) == SYMBOL_REF)
6095 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6096 new = gen_rtx_CONST (Pmode, new);
6097 new = gen_const_mem (Pmode, new);
6098 set_mem_alias_set (new, ix86_GOT_alias_set ());
6101 reg = gen_reg_rtx (Pmode);
6102 /* Use directly gen_movsi, otherwise the address is loaded
6103 into register for CSE. We don't want to CSE this addresses,
6104 instead we CSE addresses from the GOT table, so skip this. */
6105 emit_insn (gen_movsi (reg, new));
6110 /* This symbol must be referenced via a load from the
6111 Global Offset Table (@GOT). */
6113 if (reload_in_progress)
6114 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6115 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6116 new = gen_rtx_CONST (Pmode, new);
6117 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6118 new = gen_const_mem (Pmode, new);
6119 set_mem_alias_set (new, ix86_GOT_alias_set ());
6122 reg = gen_reg_rtx (Pmode);
6123 emit_move_insn (reg, new);
6129 if (GET_CODE (addr) == CONST_INT
6130 && !x86_64_immediate_operand (addr, VOIDmode))
6134 emit_move_insn (reg, addr);
6138 new = force_reg (Pmode, addr);
6140 else if (GET_CODE (addr) == CONST)
6142 addr = XEXP (addr, 0);
6144 /* We must match stuff we generate before. Assume the only
6145 unspecs that can get here are ours. Not that we could do
6146 anything with them anyway.... */
6147 if (GET_CODE (addr) == UNSPEC
6148 || (GET_CODE (addr) == PLUS
6149 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6151 gcc_assert (GET_CODE (addr) == PLUS);
6153 if (GET_CODE (addr) == PLUS)
6155 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6157 /* Check first to see if this is a constant offset from a @GOTOFF
6158 symbol reference. */
6159 if (local_symbolic_operand (op0, Pmode)
6160 && GET_CODE (op1) == CONST_INT)
6164 if (reload_in_progress)
6165 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6166 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6168 new = gen_rtx_PLUS (Pmode, new, op1);
6169 new = gen_rtx_CONST (Pmode, new);
6170 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6174 emit_move_insn (reg, new);
6180 if (INTVAL (op1) < -16*1024*1024
6181 || INTVAL (op1) >= 16*1024*1024)
6183 if (!x86_64_immediate_operand (op1, Pmode))
6184 op1 = force_reg (Pmode, op1);
6185 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6191 base = legitimize_pic_address (XEXP (addr, 0), reg);
6192 new = legitimize_pic_address (XEXP (addr, 1),
6193 base == reg ? NULL_RTX : reg);
6195 if (GET_CODE (new) == CONST_INT)
6196 new = plus_constant (base, INTVAL (new));
6199 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6201 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6202 new = XEXP (new, 1);
6204 new = gen_rtx_PLUS (Pmode, base, new);
6212 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6215 get_thread_pointer (int to_reg)
6219 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6223 reg = gen_reg_rtx (Pmode);
6224 insn = gen_rtx_SET (VOIDmode, reg, tp);
6225 insn = emit_insn (insn);
6230 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6231 false if we expect this to be used for a memory address and true if
6232 we expect to load the address into a register. */
6235 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6237 rtx dest, base, off, pic;
6242 case TLS_MODEL_GLOBAL_DYNAMIC:
6243 dest = gen_reg_rtx (Pmode);
6246 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6249 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
6250 insns = get_insns ();
6253 emit_libcall_block (insns, dest, rax, x);
6256 emit_insn (gen_tls_global_dynamic_32 (dest, x));
6259 case TLS_MODEL_LOCAL_DYNAMIC:
6260 base = gen_reg_rtx (Pmode);
6263 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
6266 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
6267 insns = get_insns ();
6270 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
6271 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
6272 emit_libcall_block (insns, base, rax, note);
6275 emit_insn (gen_tls_local_dynamic_base_32 (base));
6277 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
6278 off = gen_rtx_CONST (Pmode, off);
6280 return gen_rtx_PLUS (Pmode, base, off);
6282 case TLS_MODEL_INITIAL_EXEC:
6286 type = UNSPEC_GOTNTPOFF;
6290 if (reload_in_progress)
6291 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6292 pic = pic_offset_table_rtx;
6293 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
6295 else if (!TARGET_GNU_TLS)
6297 pic = gen_reg_rtx (Pmode);
6298 emit_insn (gen_set_got (pic));
6299 type = UNSPEC_GOTTPOFF;
6304 type = UNSPEC_INDNTPOFF;
6307 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
6308 off = gen_rtx_CONST (Pmode, off);
6310 off = gen_rtx_PLUS (Pmode, pic, off);
6311 off = gen_const_mem (Pmode, off);
6312 set_mem_alias_set (off, ix86_GOT_alias_set ());
6314 if (TARGET_64BIT || TARGET_GNU_TLS)
6316 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6317 off = force_reg (Pmode, off);
6318 return gen_rtx_PLUS (Pmode, base, off);
6322 base = get_thread_pointer (true);
6323 dest = gen_reg_rtx (Pmode);
6324 emit_insn (gen_subsi3 (dest, base, off));
6328 case TLS_MODEL_LOCAL_EXEC:
6329 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
6330 (TARGET_64BIT || TARGET_GNU_TLS)
6331 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
6332 off = gen_rtx_CONST (Pmode, off);
6334 if (TARGET_64BIT || TARGET_GNU_TLS)
6336 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6337 return gen_rtx_PLUS (Pmode, base, off);
6341 base = get_thread_pointer (true);
6342 dest = gen_reg_rtx (Pmode);
6343 emit_insn (gen_subsi3 (dest, base, off));
6354 /* Try machine-dependent ways of modifying an illegitimate address
6355 to be legitimate. If we find one, return the new, valid address.
6356 This macro is used in only one place: `memory_address' in explow.c.
6358 OLDX is the address as it was before break_out_memory_refs was called.
6359 In some cases it is useful to look at this to decide what needs to be done.
6361 MODE and WIN are passed so that this macro can use
6362 GO_IF_LEGITIMATE_ADDRESS.
6364 It is always safe for this macro to do nothing. It exists to recognize
6365 opportunities to optimize the output.
6367 For the 80386, we handle X+REG by loading X into a register R and
6368 using R+REG. R will go in a general reg and indexing will be used.
6369 However, if REG is a broken-out memory address or multiplication,
6370 nothing needs to be done because REG can certainly go in a general reg.
6372 When -fpic is used, special handling is needed for symbolic references.
6373 See comments by legitimize_pic_address in i386.c for details. */
6376 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
6381 if (TARGET_DEBUG_ADDR)
6383 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
6384 GET_MODE_NAME (mode));
6388 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
6390 return legitimize_tls_address (x, log, false);
6391 if (GET_CODE (x) == CONST
6392 && GET_CODE (XEXP (x, 0)) == PLUS
6393 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6394 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
6396 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
6397 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
6400 if (flag_pic && SYMBOLIC_CONST (x))
6401 return legitimize_pic_address (x, 0);
6403 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
6404 if (GET_CODE (x) == ASHIFT
6405 && GET_CODE (XEXP (x, 1)) == CONST_INT
6406 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
6409 log = INTVAL (XEXP (x, 1));
6410 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
6411 GEN_INT (1 << log));
6414 if (GET_CODE (x) == PLUS)
6416 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
6418 if (GET_CODE (XEXP (x, 0)) == ASHIFT
6419 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6420 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
6423 log = INTVAL (XEXP (XEXP (x, 0), 1));
6424 XEXP (x, 0) = gen_rtx_MULT (Pmode,
6425 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
6426 GEN_INT (1 << log));
6429 if (GET_CODE (XEXP (x, 1)) == ASHIFT
6430 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
6431 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
6434 log = INTVAL (XEXP (XEXP (x, 1), 1));
6435 XEXP (x, 1) = gen_rtx_MULT (Pmode,
6436 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
6437 GEN_INT (1 << log));
6440 /* Put multiply first if it isn't already. */
6441 if (GET_CODE (XEXP (x, 1)) == MULT)
6443 rtx tmp = XEXP (x, 0);
6444 XEXP (x, 0) = XEXP (x, 1);
6449 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
6450 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
6451 created by virtual register instantiation, register elimination, and
6452 similar optimizations. */
6453 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
6456 x = gen_rtx_PLUS (Pmode,
6457 gen_rtx_PLUS (Pmode, XEXP (x, 0),
6458 XEXP (XEXP (x, 1), 0)),
6459 XEXP (XEXP (x, 1), 1));
6463 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
6464 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
6465 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
6466 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6467 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
6468 && CONSTANT_P (XEXP (x, 1)))
6471 rtx other = NULL_RTX;
6473 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6475 constant = XEXP (x, 1);
6476 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
6478 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
6480 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
6481 other = XEXP (x, 1);
6489 x = gen_rtx_PLUS (Pmode,
6490 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
6491 XEXP (XEXP (XEXP (x, 0), 1), 0)),
6492 plus_constant (other, INTVAL (constant)));
6496 if (changed && legitimate_address_p (mode, x, FALSE))
6499 if (GET_CODE (XEXP (x, 0)) == MULT)
6502 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
6505 if (GET_CODE (XEXP (x, 1)) == MULT)
6508 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
6512 && GET_CODE (XEXP (x, 1)) == REG
6513 && GET_CODE (XEXP (x, 0)) == REG)
6516 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
6519 x = legitimize_pic_address (x, 0);
6522 if (changed && legitimate_address_p (mode, x, FALSE))
6525 if (GET_CODE (XEXP (x, 0)) == REG)
6527 rtx temp = gen_reg_rtx (Pmode);
6528 rtx val = force_operand (XEXP (x, 1), temp);
6530 emit_move_insn (temp, val);
6536 else if (GET_CODE (XEXP (x, 1)) == REG)
6538 rtx temp = gen_reg_rtx (Pmode);
6539 rtx val = force_operand (XEXP (x, 0), temp);
6541 emit_move_insn (temp, val);
6551 /* Print an integer constant expression in assembler syntax. Addition
6552 and subtraction are the only arithmetic that may appear in these
6553 expressions. FILE is the stdio stream to write to, X is the rtx, and
6554 CODE is the operand print code from the output string. */
6557 output_pic_addr_const (FILE *file, rtx x, int code)
6561 switch (GET_CODE (x))
6564 gcc_assert (flag_pic);
6569 assemble_name (file, XSTR (x, 0));
6570 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
6571 fputs ("@PLT", file);
6578 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
6579 assemble_name (asm_out_file, buf);
6583 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6587 /* This used to output parentheses around the expression,
6588 but that does not work on the 386 (either ATT or BSD assembler). */
6589 output_pic_addr_const (file, XEXP (x, 0), code);
6593 if (GET_MODE (x) == VOIDmode)
6595 /* We can use %d if the number is <32 bits and positive. */
6596 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
6597 fprintf (file, "0x%lx%08lx",
6598 (unsigned long) CONST_DOUBLE_HIGH (x),
6599 (unsigned long) CONST_DOUBLE_LOW (x));
6601 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
6604 /* We can't handle floating point constants;
6605 PRINT_OPERAND must handle them. */
6606 output_operand_lossage ("floating constant misused");
6610 /* Some assemblers need integer constants to appear first. */
6611 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
6613 output_pic_addr_const (file, XEXP (x, 0), code);
6615 output_pic_addr_const (file, XEXP (x, 1), code);
6619 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
6620 output_pic_addr_const (file, XEXP (x, 1), code);
6622 output_pic_addr_const (file, XEXP (x, 0), code);
6628 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
6629 output_pic_addr_const (file, XEXP (x, 0), code);
6631 output_pic_addr_const (file, XEXP (x, 1), code);
6633 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
6637 gcc_assert (XVECLEN (x, 0) == 1);
6638 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
6639 switch (XINT (x, 1))
6642 fputs ("@GOT", file);
6645 fputs ("@GOTOFF", file);
6647 case UNSPEC_GOTPCREL:
6648 fputs ("@GOTPCREL(%rip)", file);
6650 case UNSPEC_GOTTPOFF:
6651 /* FIXME: This might be @TPOFF in Sun ld too. */
6652 fputs ("@GOTTPOFF", file);
6655 fputs ("@TPOFF", file);
6659 fputs ("@TPOFF", file);
6661 fputs ("@NTPOFF", file);
6664 fputs ("@DTPOFF", file);
6666 case UNSPEC_GOTNTPOFF:
6668 fputs ("@GOTTPOFF(%rip)", file);
6670 fputs ("@GOTNTPOFF", file);
6672 case UNSPEC_INDNTPOFF:
6673 fputs ("@INDNTPOFF", file);
6676 output_operand_lossage ("invalid UNSPEC as operand");
6682 output_operand_lossage ("invalid expression as operand");
6686 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6687 We need to emit DTP-relative relocations. */
6690 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6692 fputs (ASM_LONG, file);
6693 output_addr_const (file, x);
6694 fputs ("@DTPOFF", file);
6700 fputs (", 0", file);
6707 /* In the name of slightly smaller debug output, and to cater to
6708 general assembler lossage, recognize PIC+GOTOFF and turn it back
6709 into a direct symbol reference. */
6712 ix86_delegitimize_address (rtx orig_x)
6716 if (GET_CODE (x) == MEM)
6721 if (GET_CODE (x) != CONST
6722 || GET_CODE (XEXP (x, 0)) != UNSPEC
6723 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6724 || GET_CODE (orig_x) != MEM)
6726 return XVECEXP (XEXP (x, 0), 0, 0);
6729 if (GET_CODE (x) != PLUS
6730 || GET_CODE (XEXP (x, 1)) != CONST)
6733 if (GET_CODE (XEXP (x, 0)) == REG
6734 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6735 /* %ebx + GOT/GOTOFF */
6737 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6739 /* %ebx + %reg * scale + GOT/GOTOFF */
6741 if (GET_CODE (XEXP (y, 0)) == REG
6742 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6744 else if (GET_CODE (XEXP (y, 1)) == REG
6745 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6749 if (GET_CODE (y) != REG
6750 && GET_CODE (y) != MULT
6751 && GET_CODE (y) != ASHIFT)
6757 x = XEXP (XEXP (x, 1), 0);
6758 if (GET_CODE (x) == UNSPEC
6759 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6760 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6763 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6764 return XVECEXP (x, 0, 0);
6767 if (GET_CODE (x) == PLUS
6768 && GET_CODE (XEXP (x, 0)) == UNSPEC
6769 && GET_CODE (XEXP (x, 1)) == CONST_INT
6770 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6771 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6772 && GET_CODE (orig_x) != MEM)))
6774 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6776 return gen_rtx_PLUS (Pmode, y, x);
6784 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6789 if (mode == CCFPmode || mode == CCFPUmode)
6791 enum rtx_code second_code, bypass_code;
6792 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6793 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
6794 code = ix86_fp_compare_code_to_integer (code);
6798 code = reverse_condition (code);
6809 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
6813 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
6814 Those same assemblers have the same but opposite lossage on cmov. */
6815 gcc_assert (mode == CCmode);
6816 suffix = fp ? "nbe" : "a";
6836 gcc_assert (mode == CCmode);
6858 gcc_assert (mode == CCmode);
6859 suffix = fp ? "nb" : "ae";
6862 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
6866 gcc_assert (mode == CCmode);
6870 suffix = fp ? "u" : "p";
6873 suffix = fp ? "nu" : "np";
6878 fputs (suffix, file);
6881 /* Print the name of register X to FILE based on its machine mode and number.
6882 If CODE is 'w', pretend the mode is HImode.
6883 If CODE is 'b', pretend the mode is QImode.
6884 If CODE is 'k', pretend the mode is SImode.
6885 If CODE is 'q', pretend the mode is DImode.
6886 If CODE is 'h', pretend the reg is the 'high' byte register.
6887 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6890 print_reg (rtx x, int code, FILE *file)
6892 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
6893 && REGNO (x) != FRAME_POINTER_REGNUM
6894 && REGNO (x) != FLAGS_REG
6895 && REGNO (x) != FPSR_REG);
6897 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6900 if (code == 'w' || MMX_REG_P (x))
6902 else if (code == 'b')
6904 else if (code == 'k')
6906 else if (code == 'q')
6908 else if (code == 'y')
6910 else if (code == 'h')
6913 code = GET_MODE_SIZE (GET_MODE (x));
6915 /* Irritatingly, AMD extended registers use different naming convention
6916 from the normal registers. */
6917 if (REX_INT_REG_P (x))
6919 gcc_assert (TARGET_64BIT);
6923 error ("extended registers have no high halves");
6926 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6929 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6932 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6935 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6938 error ("unsupported operand size for extended register");
6946 if (STACK_TOP_P (x))
6948 fputs ("st(0)", file);
6955 if (! ANY_FP_REG_P (x))
6956 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6961 fputs (hi_reg_name[REGNO (x)], file);
6964 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6966 fputs (qi_reg_name[REGNO (x)], file);
6969 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6971 fputs (qi_high_reg_name[REGNO (x)], file);
6978 /* Locate some local-dynamic symbol still in use by this function
6979 so that we can print its name in some tls_local_dynamic_base
6983 get_some_local_dynamic_name (void)
6987 if (cfun->machine->some_ld_name)
6988 return cfun->machine->some_ld_name;
6990 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6992 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6993 return cfun->machine->some_ld_name;
6999 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7003 if (GET_CODE (x) == SYMBOL_REF
7004 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7006 cfun->machine->some_ld_name = XSTR (x, 0);
7014 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7015 C -- print opcode suffix for set/cmov insn.
7016 c -- like C, but print reversed condition
7017 F,f -- likewise, but for floating-point.
7018 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7020 R -- print the prefix for register names.
7021 z -- print the opcode suffix for the size of the current operand.
7022 * -- print a star (in certain assembler syntax)
7023 A -- print an absolute memory reference.
7024 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7025 s -- print a shift double count, followed by the assemblers argument
7027 b -- print the QImode name of the register for the indicated operand.
7028 %b0 would print %al if operands[0] is reg 0.
7029 w -- likewise, print the HImode name of the register.
7030 k -- likewise, print the SImode name of the register.
7031 q -- likewise, print the DImode name of the register.
7032 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7033 y -- print "st(0)" instead of "st" as a register.
7034 D -- print condition for SSE cmp instruction.
7035 P -- if PIC, print an @PLT suffix.
7036 X -- don't print any sort of PIC '@' suffix for a symbol.
7037 & -- print some in-use local-dynamic symbol name.
7038 H -- print a memory address offset by 8; used for sse high-parts
7042 print_operand (FILE *file, rtx x, int code)
7049 if (ASSEMBLER_DIALECT == ASM_ATT)
7054 assemble_name (file, get_some_local_dynamic_name ());
7058 switch (ASSEMBLER_DIALECT)
7065 /* Intel syntax. For absolute addresses, registers should not
7066 be surrounded by braces. */
7067 if (GET_CODE (x) != REG)
7070 PRINT_OPERAND (file, x, 0);
7080 PRINT_OPERAND (file, x, 0);
7085 if (ASSEMBLER_DIALECT == ASM_ATT)
7090 if (ASSEMBLER_DIALECT == ASM_ATT)
7095 if (ASSEMBLER_DIALECT == ASM_ATT)
7100 if (ASSEMBLER_DIALECT == ASM_ATT)
7105 if (ASSEMBLER_DIALECT == ASM_ATT)
7110 if (ASSEMBLER_DIALECT == ASM_ATT)
7115 /* 387 opcodes don't get size suffixes if the operands are
7117 if (STACK_REG_P (x))
7120 /* Likewise if using Intel opcodes. */
7121 if (ASSEMBLER_DIALECT == ASM_INTEL)
7124 /* This is the size of op from size of operand. */
7125 switch (GET_MODE_SIZE (GET_MODE (x)))
7128 #ifdef HAVE_GAS_FILDS_FISTS
7134 if (GET_MODE (x) == SFmode)
7149 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7151 #ifdef GAS_MNEMONICS
7177 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7179 PRINT_OPERAND (file, x, 0);
7185 /* Little bit of braindamage here. The SSE compare instructions
7186 does use completely different names for the comparisons that the
7187 fp conditional moves. */
7188 switch (GET_CODE (x))
7203 fputs ("unord", file);
7207 fputs ("neq", file);
7211 fputs ("nlt", file);
7215 fputs ("nle", file);
7218 fputs ("ord", file);
7225 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7226 if (ASSEMBLER_DIALECT == ASM_ATT)
7228 switch (GET_MODE (x))
7230 case HImode: putc ('w', file); break;
7232 case SFmode: putc ('l', file); break;
7234 case DFmode: putc ('q', file); break;
7235 default: gcc_unreachable ();
7242 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
7245 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7246 if (ASSEMBLER_DIALECT == ASM_ATT)
7249 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
7252 /* Like above, but reverse condition */
7254 /* Check to see if argument to %c is really a constant
7255 and not a condition code which needs to be reversed. */
7256 if (!COMPARISON_P (x))
7258 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
7261 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
7264 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7265 if (ASSEMBLER_DIALECT == ASM_ATT)
7268 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
7272 /* It doesn't actually matter what mode we use here, as we're
7273 only going to use this for printing. */
7274 x = adjust_address_nv (x, DImode, 8);
7281 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
7284 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
7287 int pred_val = INTVAL (XEXP (x, 0));
7289 if (pred_val < REG_BR_PROB_BASE * 45 / 100
7290 || pred_val > REG_BR_PROB_BASE * 55 / 100)
7292 int taken = pred_val > REG_BR_PROB_BASE / 2;
7293 int cputaken = final_forward_branch_p (current_output_insn) == 0;
7295 /* Emit hints only in the case default branch prediction
7296 heuristics would fail. */
7297 if (taken != cputaken)
7299 /* We use 3e (DS) prefix for taken branches and
7300 2e (CS) prefix for not taken branches. */
7302 fputs ("ds ; ", file);
7304 fputs ("cs ; ", file);
7311 output_operand_lossage ("invalid operand code '%c'", code);
7315 if (GET_CODE (x) == REG)
7316 print_reg (x, code, file);
7318 else if (GET_CODE (x) == MEM)
7320 /* No `byte ptr' prefix for call instructions. */
7321 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
7324 switch (GET_MODE_SIZE (GET_MODE (x)))
7326 case 1: size = "BYTE"; break;
7327 case 2: size = "WORD"; break;
7328 case 4: size = "DWORD"; break;
7329 case 8: size = "QWORD"; break;
7330 case 12: size = "XWORD"; break;
7331 case 16: size = "XMMWORD"; break;
7336 /* Check for explicit size override (codes 'b', 'w' and 'k') */
7339 else if (code == 'w')
7341 else if (code == 'k')
7345 fputs (" PTR ", file);
7349 /* Avoid (%rip) for call operands. */
7350 if (CONSTANT_ADDRESS_P (x) && code == 'P'
7351 && GET_CODE (x) != CONST_INT)
7352 output_addr_const (file, x);
7353 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
7354 output_operand_lossage ("invalid constraints for operand");
7359 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
7364 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7365 REAL_VALUE_TO_TARGET_SINGLE (r, l);
7367 if (ASSEMBLER_DIALECT == ASM_ATT)
7369 fprintf (file, "0x%08lx", l);
7372 /* These float cases don't actually occur as immediate operands. */
7373 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
7377 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7378 fprintf (file, "%s", dstr);
7381 else if (GET_CODE (x) == CONST_DOUBLE
7382 && GET_MODE (x) == XFmode)
7386 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7387 fprintf (file, "%s", dstr);
7392 /* We have patterns that allow zero sets of memory, for instance.
7393 In 64-bit mode, we should probably support all 8-byte vectors,
7394 since we can in fact encode that into an immediate. */
7395 if (GET_CODE (x) == CONST_VECTOR)
7397 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
7403 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
7405 if (ASSEMBLER_DIALECT == ASM_ATT)
7408 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
7409 || GET_CODE (x) == LABEL_REF)
7411 if (ASSEMBLER_DIALECT == ASM_ATT)
7414 fputs ("OFFSET FLAT:", file);
7417 if (GET_CODE (x) == CONST_INT)
7418 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7420 output_pic_addr_const (file, x, code);
7422 output_addr_const (file, x);
7426 /* Print a memory operand whose address is ADDR. */
7429 print_operand_address (FILE *file, rtx addr)
7431 struct ix86_address parts;
7432 rtx base, index, disp;
7434 int ok = ix86_decompose_address (addr, &parts);
7439 index = parts.index;
7441 scale = parts.scale;
7449 if (USER_LABEL_PREFIX[0] == 0)
7451 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
7457 if (!base && !index)
7459 /* Displacement only requires special attention. */
7461 if (GET_CODE (disp) == CONST_INT)
7463 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
7465 if (USER_LABEL_PREFIX[0] == 0)
7467 fputs ("ds:", file);
7469 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
7472 output_pic_addr_const (file, disp, 0);
7474 output_addr_const (file, disp);
7476 /* Use one byte shorter RIP relative addressing for 64bit mode. */
7479 if (GET_CODE (disp) == CONST
7480 && GET_CODE (XEXP (disp, 0)) == PLUS
7481 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7482 disp = XEXP (XEXP (disp, 0), 0);
7483 if (GET_CODE (disp) == LABEL_REF
7484 || (GET_CODE (disp) == SYMBOL_REF
7485 && SYMBOL_REF_TLS_MODEL (disp) == 0))
7486 fputs ("(%rip)", file);
7491 if (ASSEMBLER_DIALECT == ASM_ATT)
7496 output_pic_addr_const (file, disp, 0);
7497 else if (GET_CODE (disp) == LABEL_REF)
7498 output_asm_label (disp);
7500 output_addr_const (file, disp);
7505 print_reg (base, 0, file);
7509 print_reg (index, 0, file);
7511 fprintf (file, ",%d", scale);
7517 rtx offset = NULL_RTX;
7521 /* Pull out the offset of a symbol; print any symbol itself. */
7522 if (GET_CODE (disp) == CONST
7523 && GET_CODE (XEXP (disp, 0)) == PLUS
7524 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7526 offset = XEXP (XEXP (disp, 0), 1);
7527 disp = gen_rtx_CONST (VOIDmode,
7528 XEXP (XEXP (disp, 0), 0));
7532 output_pic_addr_const (file, disp, 0);
7533 else if (GET_CODE (disp) == LABEL_REF)
7534 output_asm_label (disp);
7535 else if (GET_CODE (disp) == CONST_INT)
7538 output_addr_const (file, disp);
7544 print_reg (base, 0, file);
7547 if (INTVAL (offset) >= 0)
7549 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7553 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7560 print_reg (index, 0, file);
7562 fprintf (file, "*%d", scale);
7570 output_addr_const_extra (FILE *file, rtx x)
7574 if (GET_CODE (x) != UNSPEC)
7577 op = XVECEXP (x, 0, 0);
7578 switch (XINT (x, 1))
7580 case UNSPEC_GOTTPOFF:
7581 output_addr_const (file, op);
7582 /* FIXME: This might be @TPOFF in Sun ld. */
7583 fputs ("@GOTTPOFF", file);
7586 output_addr_const (file, op);
7587 fputs ("@TPOFF", file);
7590 output_addr_const (file, op);
7592 fputs ("@TPOFF", file);
7594 fputs ("@NTPOFF", file);
7597 output_addr_const (file, op);
7598 fputs ("@DTPOFF", file);
7600 case UNSPEC_GOTNTPOFF:
7601 output_addr_const (file, op);
7603 fputs ("@GOTTPOFF(%rip)", file);
7605 fputs ("@GOTNTPOFF", file);
7607 case UNSPEC_INDNTPOFF:
7608 output_addr_const (file, op);
7609 fputs ("@INDNTPOFF", file);
7619 /* Split one or more DImode RTL references into pairs of SImode
7620 references. The RTL can be REG, offsettable MEM, integer constant, or
7621 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7622 split and "num" is its length. lo_half and hi_half are output arrays
7623 that parallel "operands". */
7626 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7630 rtx op = operands[num];
7632 /* simplify_subreg refuse to split volatile memory addresses,
7633 but we still have to handle it. */
7634 if (GET_CODE (op) == MEM)
7636 lo_half[num] = adjust_address (op, SImode, 0);
7637 hi_half[num] = adjust_address (op, SImode, 4);
7641 lo_half[num] = simplify_gen_subreg (SImode, op,
7642 GET_MODE (op) == VOIDmode
7643 ? DImode : GET_MODE (op), 0);
7644 hi_half[num] = simplify_gen_subreg (SImode, op,
7645 GET_MODE (op) == VOIDmode
7646 ? DImode : GET_MODE (op), 4);
7650 /* Split one or more TImode RTL references into pairs of DImode
7651 references. The RTL can be REG, offsettable MEM, integer constant, or
7652 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7653 split and "num" is its length. lo_half and hi_half are output arrays
7654 that parallel "operands". */
7657 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7661 rtx op = operands[num];
7663 /* simplify_subreg refuse to split volatile memory addresses, but we
7664 still have to handle it. */
7665 if (GET_CODE (op) == MEM)
7667 lo_half[num] = adjust_address (op, DImode, 0);
7668 hi_half[num] = adjust_address (op, DImode, 8);
7672 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
7673 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
7678 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
7679 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
7680 is the expression of the binary operation. The output may either be
7681 emitted here, or returned to the caller, like all output_* functions.
7683 There is no guarantee that the operands are the same mode, as they
7684 might be within FLOAT or FLOAT_EXTEND expressions. */
7686 #ifndef SYSV386_COMPAT
7687 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
7688 wants to fix the assemblers because that causes incompatibility
7689 with gcc. No-one wants to fix gcc because that causes
7690 incompatibility with assemblers... You can use the option of
7691 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
7692 #define SYSV386_COMPAT 1
7696 output_387_binary_op (rtx insn, rtx *operands)
7698 static char buf[30];
7701 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
7703 #ifdef ENABLE_CHECKING
7704 /* Even if we do not want to check the inputs, this documents input
7705 constraints. Which helps in understanding the following code. */
7706 if (STACK_REG_P (operands[0])
7707 && ((REG_P (operands[1])
7708 && REGNO (operands[0]) == REGNO (operands[1])
7709 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7710 || (REG_P (operands[2])
7711 && REGNO (operands[0]) == REGNO (operands[2])
7712 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7713 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7716 gcc_assert (is_sse);
7719 switch (GET_CODE (operands[3]))
7722 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7723 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7731 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7732 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7740 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7741 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7749 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7750 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7764 if (GET_MODE (operands[0]) == SFmode)
7765 strcat (buf, "ss\t{%2, %0|%0, %2}");
7767 strcat (buf, "sd\t{%2, %0|%0, %2}");
7772 switch (GET_CODE (operands[3]))
7776 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7778 rtx temp = operands[2];
7779 operands[2] = operands[1];
7783 /* know operands[0] == operands[1]. */
7785 if (GET_CODE (operands[2]) == MEM)
7791 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7793 if (STACK_TOP_P (operands[0]))
7794 /* How is it that we are storing to a dead operand[2]?
7795 Well, presumably operands[1] is dead too. We can't
7796 store the result to st(0) as st(0) gets popped on this
7797 instruction. Instead store to operands[2] (which I
7798 think has to be st(1)). st(1) will be popped later.
7799 gcc <= 2.8.1 didn't have this check and generated
7800 assembly code that the Unixware assembler rejected. */
7801 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7803 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7807 if (STACK_TOP_P (operands[0]))
7808 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7810 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7815 if (GET_CODE (operands[1]) == MEM)
7821 if (GET_CODE (operands[2]) == MEM)
7827 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7830 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7831 derived assemblers, confusingly reverse the direction of
7832 the operation for fsub{r} and fdiv{r} when the
7833 destination register is not st(0). The Intel assembler
7834 doesn't have this brain damage. Read !SYSV386_COMPAT to
7835 figure out what the hardware really does. */
7836 if (STACK_TOP_P (operands[0]))
7837 p = "{p\t%0, %2|rp\t%2, %0}";
7839 p = "{rp\t%2, %0|p\t%0, %2}";
7841 if (STACK_TOP_P (operands[0]))
7842 /* As above for fmul/fadd, we can't store to st(0). */
7843 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7845 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7850 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7853 if (STACK_TOP_P (operands[0]))
7854 p = "{rp\t%0, %1|p\t%1, %0}";
7856 p = "{p\t%1, %0|rp\t%0, %1}";
7858 if (STACK_TOP_P (operands[0]))
7859 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7861 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7866 if (STACK_TOP_P (operands[0]))
7868 if (STACK_TOP_P (operands[1]))
7869 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7871 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7874 else if (STACK_TOP_P (operands[1]))
7877 p = "{\t%1, %0|r\t%0, %1}";
7879 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7885 p = "{r\t%2, %0|\t%0, %2}";
7887 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7900 /* Return needed mode for entity in optimize_mode_switching pass. */
7903 ix86_mode_needed (int entity, rtx insn)
7905 enum attr_i387_cw mode;
7907 /* The mode UNINITIALIZED is used to store control word after a
7908 function call or ASM pattern. The mode ANY specify that function
7909 has no requirements on the control word and make no changes in the
7910 bits we are interested in. */
7913 || (NONJUMP_INSN_P (insn)
7914 && (asm_noperands (PATTERN (insn)) >= 0
7915 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
7916 return I387_CW_UNINITIALIZED;
7918 if (recog_memoized (insn) < 0)
7921 mode = get_attr_i387_cw (insn);
7926 if (mode == I387_CW_TRUNC)
7931 if (mode == I387_CW_FLOOR)
7936 if (mode == I387_CW_CEIL)
7941 if (mode == I387_CW_MASK_PM)
7952 /* Output code to initialize control word copies used by trunc?f?i and
7953 rounding patterns. CURRENT_MODE is set to current control word,
7954 while NEW_MODE is set to new control word. */
7957 emit_i387_cw_initialization (int mode)
7959 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
7964 rtx reg = gen_reg_rtx (HImode);
7966 emit_insn (gen_x86_fnstcw_1 (stored_mode));
7967 emit_move_insn (reg, stored_mode);
7969 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
7974 /* round toward zero (truncate) */
7975 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7976 slot = SLOT_CW_TRUNC;
7980 /* round down toward -oo */
7981 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7982 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7983 slot = SLOT_CW_FLOOR;
7987 /* round up toward +oo */
7988 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7989 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7990 slot = SLOT_CW_CEIL;
7993 case I387_CW_MASK_PM:
7994 /* mask precision exception for nearbyint() */
7995 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7996 slot = SLOT_CW_MASK_PM;
8008 /* round toward zero (truncate) */
8009 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8010 slot = SLOT_CW_TRUNC;
8014 /* round down toward -oo */
8015 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8016 slot = SLOT_CW_FLOOR;
8020 /* round up toward +oo */
8021 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8022 slot = SLOT_CW_CEIL;
8025 case I387_CW_MASK_PM:
8026 /* mask precision exception for nearbyint() */
8027 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8028 slot = SLOT_CW_MASK_PM;
8036 gcc_assert (slot < MAX_386_STACK_LOCALS);
8038 new_mode = assign_386_stack_local (HImode, slot);
8039 emit_move_insn (new_mode, reg);
8042 /* Output code for INSN to convert a float to a signed int. OPERANDS
8043 are the insn operands. The output may be [HSD]Imode and the input
8044 operand may be [SDX]Fmode. */
8047 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8049 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8050 int dimode_p = GET_MODE (operands[0]) == DImode;
8051 int round_mode = get_attr_i387_cw (insn);
8053 /* Jump through a hoop or two for DImode, since the hardware has no
8054 non-popping instruction. We used to do this a different way, but
8055 that was somewhat fragile and broke with post-reload splitters. */
8056 if ((dimode_p || fisttp) && !stack_top_dies)
8057 output_asm_insn ("fld\t%y1", operands);
8059 gcc_assert (STACK_TOP_P (operands[1]));
8060 gcc_assert (GET_CODE (operands[0]) == MEM);
8063 output_asm_insn ("fisttp%z0\t%0", operands);
8066 if (round_mode != I387_CW_ANY)
8067 output_asm_insn ("fldcw\t%3", operands);
8068 if (stack_top_dies || dimode_p)
8069 output_asm_insn ("fistp%z0\t%0", operands);
8071 output_asm_insn ("fist%z0\t%0", operands);
8072 if (round_mode != I387_CW_ANY)
8073 output_asm_insn ("fldcw\t%2", operands);
8079 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8080 should be used. UNORDERED_P is true when fucom should be used. */
8083 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8086 rtx cmp_op0, cmp_op1;
8087 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8091 cmp_op0 = operands[0];
8092 cmp_op1 = operands[1];
8096 cmp_op0 = operands[1];
8097 cmp_op1 = operands[2];
8102 if (GET_MODE (operands[0]) == SFmode)
8104 return "ucomiss\t{%1, %0|%0, %1}";
8106 return "comiss\t{%1, %0|%0, %1}";
8109 return "ucomisd\t{%1, %0|%0, %1}";
8111 return "comisd\t{%1, %0|%0, %1}";
8114 gcc_assert (STACK_TOP_P (cmp_op0));
8116 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8118 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8122 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8123 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
8126 return "ftst\n\tfnstsw\t%0";
8129 if (STACK_REG_P (cmp_op1)
8131 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8132 && REGNO (cmp_op1) != FIRST_STACK_REG)
8134 /* If both the top of the 387 stack dies, and the other operand
8135 is also a stack register that dies, then this must be a
8136 `fcompp' float compare */
8140 /* There is no double popping fcomi variant. Fortunately,
8141 eflags is immune from the fstp's cc clobbering. */
8143 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8145 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8146 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
8151 return "fucompp\n\tfnstsw\t%0";
8153 return "fcompp\n\tfnstsw\t%0";
8158 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8160 static const char * const alt[16] =
8162 "fcom%z2\t%y2\n\tfnstsw\t%0",
8163 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8164 "fucom%z2\t%y2\n\tfnstsw\t%0",
8165 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8167 "ficom%z2\t%y2\n\tfnstsw\t%0",
8168 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8172 "fcomi\t{%y1, %0|%0, %y1}",
8173 "fcomip\t{%y1, %0|%0, %y1}",
8174 "fucomi\t{%y1, %0|%0, %y1}",
8175 "fucomip\t{%y1, %0|%0, %y1}",
8186 mask = eflags_p << 3;
8187 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
8188 mask |= unordered_p << 1;
8189 mask |= stack_top_dies;
8191 gcc_assert (mask < 16);
8200 ix86_output_addr_vec_elt (FILE *file, int value)
8202 const char *directive = ASM_LONG;
8206 directive = ASM_QUAD;
8208 gcc_assert (!TARGET_64BIT);
8211 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
8215 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
8218 fprintf (file, "%s%s%d-%s%d\n",
8219 ASM_LONG, LPREFIX, value, LPREFIX, rel);
8220 else if (HAVE_AS_GOTOFF_IN_DATA)
8221 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
8223 else if (TARGET_MACHO)
8225 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
8226 machopic_output_function_base_name (file);
8227 fprintf(file, "\n");
8231 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
8232 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
8235 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
8239 ix86_expand_clear (rtx dest)
8243 /* We play register width games, which are only valid after reload. */
8244 gcc_assert (reload_completed);
8246 /* Avoid HImode and its attendant prefix byte. */
8247 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
8248 dest = gen_rtx_REG (SImode, REGNO (dest));
8250 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
8252 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
8253 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
8255 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
8256 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
8262 /* X is an unchanging MEM. If it is a constant pool reference, return
8263 the constant pool rtx, else NULL. */
8266 maybe_get_pool_constant (rtx x)
8268 x = ix86_delegitimize_address (XEXP (x, 0));
8270 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
8271 return get_pool_constant (x);
8277 ix86_expand_move (enum machine_mode mode, rtx operands[])
8279 int strict = (reload_in_progress || reload_completed);
8281 enum tls_model model;
8286 if (GET_CODE (op1) == SYMBOL_REF)
8288 model = SYMBOL_REF_TLS_MODEL (op1);
8291 op1 = legitimize_tls_address (op1, model, true);
8292 op1 = force_operand (op1, op0);
8297 else if (GET_CODE (op1) == CONST
8298 && GET_CODE (XEXP (op1, 0)) == PLUS
8299 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
8301 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
8304 rtx addend = XEXP (XEXP (op1, 0), 1);
8305 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
8306 op1 = force_operand (op1, NULL);
8307 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
8308 op0, 1, OPTAB_DIRECT);
8314 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
8319 rtx temp = ((reload_in_progress
8320 || ((op0 && GET_CODE (op0) == REG)
8322 ? op0 : gen_reg_rtx (Pmode));
8323 op1 = machopic_indirect_data_reference (op1, temp);
8324 op1 = machopic_legitimize_pic_address (op1, mode,
8325 temp == op1 ? 0 : temp);
8327 else if (MACHOPIC_INDIRECT)
8328 op1 = machopic_indirect_data_reference (op1, 0);
8332 if (GET_CODE (op0) == MEM)
8333 op1 = force_reg (Pmode, op1);
8335 op1 = legitimize_address (op1, op1, Pmode);
8336 #endif /* TARGET_MACHO */
8340 if (GET_CODE (op0) == MEM
8341 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
8342 || !push_operand (op0, mode))
8343 && GET_CODE (op1) == MEM)
8344 op1 = force_reg (mode, op1);
8346 if (push_operand (op0, mode)
8347 && ! general_no_elim_operand (op1, mode))
8348 op1 = copy_to_mode_reg (mode, op1);
8350 /* Force large constants in 64bit compilation into register
8351 to get them CSEed. */
8352 if (TARGET_64BIT && mode == DImode
8353 && immediate_operand (op1, mode)
8354 && !x86_64_zext_immediate_operand (op1, VOIDmode)
8355 && !register_operand (op0, mode)
8356 && optimize && !reload_completed && !reload_in_progress)
8357 op1 = copy_to_mode_reg (mode, op1);
8359 if (FLOAT_MODE_P (mode))
8361 /* If we are loading a floating point constant to a register,
8362 force the value to memory now, since we'll get better code
8363 out the back end. */
8367 else if (GET_CODE (op1) == CONST_DOUBLE)
8369 op1 = validize_mem (force_const_mem (mode, op1));
8370 if (!register_operand (op0, mode))
8372 rtx temp = gen_reg_rtx (mode);
8373 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
8374 emit_move_insn (op0, temp);
8381 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8385 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
8387 rtx op0 = operands[0], op1 = operands[1];
8389 /* Force constants other than zero into memory. We do not know how
8390 the instructions used to build constants modify the upper 64 bits
8391 of the register, once we have that information we may be able
8392 to handle some of them more efficiently. */
8393 if ((reload_in_progress | reload_completed) == 0
8394 && register_operand (op0, mode)
8395 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
8396 op1 = validize_mem (force_const_mem (mode, op1));
8398 /* Make operand1 a register if it isn't already. */
8400 && !register_operand (op0, mode)
8401 && !register_operand (op1, mode))
8403 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
8407 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8410 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
8411 straight to ix86_expand_vector_move. */
8414 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
8423 /* If we're optimizing for size, movups is the smallest. */
8426 op0 = gen_lowpart (V4SFmode, op0);
8427 op1 = gen_lowpart (V4SFmode, op1);
8428 emit_insn (gen_sse_movups (op0, op1));
8432 /* ??? If we have typed data, then it would appear that using
8433 movdqu is the only way to get unaligned data loaded with
8435 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8437 op0 = gen_lowpart (V16QImode, op0);
8438 op1 = gen_lowpart (V16QImode, op1);
8439 emit_insn (gen_sse2_movdqu (op0, op1));
8443 if (TARGET_SSE2 && mode == V2DFmode)
8447 /* When SSE registers are split into halves, we can avoid
8448 writing to the top half twice. */
8449 if (TARGET_SSE_SPLIT_REGS)
8451 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8456 /* ??? Not sure about the best option for the Intel chips.
8457 The following would seem to satisfy; the register is
8458 entirely cleared, breaking the dependency chain. We
8459 then store to the upper half, with a dependency depth
8460 of one. A rumor has it that Intel recommends two movsd
8461 followed by an unpacklpd, but this is unconfirmed. And
8462 given that the dependency depth of the unpacklpd would
8463 still be one, I'm not sure why this would be better. */
8464 zero = CONST0_RTX (V2DFmode);
8467 m = adjust_address (op1, DFmode, 0);
8468 emit_insn (gen_sse2_loadlpd (op0, zero, m));
8469 m = adjust_address (op1, DFmode, 8);
8470 emit_insn (gen_sse2_loadhpd (op0, op0, m));
8474 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
8475 emit_move_insn (op0, CONST0_RTX (mode));
8477 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8479 if (mode != V4SFmode)
8480 op0 = gen_lowpart (V4SFmode, op0);
8481 m = adjust_address (op1, V2SFmode, 0);
8482 emit_insn (gen_sse_loadlps (op0, op0, m));
8483 m = adjust_address (op1, V2SFmode, 8);
8484 emit_insn (gen_sse_loadhps (op0, op0, m));
8487 else if (MEM_P (op0))
8489 /* If we're optimizing for size, movups is the smallest. */
8492 op0 = gen_lowpart (V4SFmode, op0);
8493 op1 = gen_lowpart (V4SFmode, op1);
8494 emit_insn (gen_sse_movups (op0, op1));
8498 /* ??? Similar to above, only less clear because of quote
8499 typeless stores unquote. */
8500 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
8501 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8503 op0 = gen_lowpart (V16QImode, op0);
8504 op1 = gen_lowpart (V16QImode, op1);
8505 emit_insn (gen_sse2_movdqu (op0, op1));
8509 if (TARGET_SSE2 && mode == V2DFmode)
8511 m = adjust_address (op0, DFmode, 0);
8512 emit_insn (gen_sse2_storelpd (m, op1));
8513 m = adjust_address (op0, DFmode, 8);
8514 emit_insn (gen_sse2_storehpd (m, op1));
8518 if (mode != V4SFmode)
8519 op1 = gen_lowpart (V4SFmode, op1);
8520 m = adjust_address (op0, V2SFmode, 0);
8521 emit_insn (gen_sse_storelps (m, op1));
8522 m = adjust_address (op0, V2SFmode, 8);
8523 emit_insn (gen_sse_storehps (m, op1));
8530 /* Expand a push in MODE. This is some mode for which we do not support
8531 proper push instructions, at least from the registers that we expect
8532 the value to live in. */
8535 ix86_expand_push (enum machine_mode mode, rtx x)
8539 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
8540 GEN_INT (-GET_MODE_SIZE (mode)),
8541 stack_pointer_rtx, 1, OPTAB_DIRECT);
8542 if (tmp != stack_pointer_rtx)
8543 emit_move_insn (stack_pointer_rtx, tmp);
8545 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
8546 emit_move_insn (tmp, x);
8549 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
8550 destination to use for the operation. If different from the true
8551 destination in operands[0], a copy operation will be required. */
8554 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
8557 int matching_memory;
8558 rtx src1, src2, dst;
8564 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
8565 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8566 && (rtx_equal_p (dst, src2)
8567 || immediate_operand (src1, mode)))
8574 /* If the destination is memory, and we do not have matching source
8575 operands, do things in registers. */
8576 matching_memory = 0;
8577 if (GET_CODE (dst) == MEM)
8579 if (rtx_equal_p (dst, src1))
8580 matching_memory = 1;
8581 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8582 && rtx_equal_p (dst, src2))
8583 matching_memory = 2;
8585 dst = gen_reg_rtx (mode);
8588 /* Both source operands cannot be in memory. */
8589 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
8591 if (matching_memory != 2)
8592 src2 = force_reg (mode, src2);
8594 src1 = force_reg (mode, src1);
8597 /* If the operation is not commutable, source 1 cannot be a constant
8598 or non-matching memory. */
8599 if ((CONSTANT_P (src1)
8600 || (!matching_memory && GET_CODE (src1) == MEM))
8601 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8602 src1 = force_reg (mode, src1);
8604 src1 = operands[1] = src1;
8605 src2 = operands[2] = src2;
8609 /* Similarly, but assume that the destination has already been
8613 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
8614 enum machine_mode mode, rtx operands[])
8616 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
8617 gcc_assert (dst == operands[0]);
8620 /* Attempt to expand a binary operator. Make the expansion closer to the
8621 actual machine, then just general_operand, which will allow 3 separate
8622 memory references (one output, two input) in a single insn. */
8625 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
8628 rtx src1, src2, dst, op, clob;
8630 dst = ix86_fixup_binary_operands (code, mode, operands);
8634 /* Emit the instruction. */
8636 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
8637 if (reload_in_progress)
8639 /* Reload doesn't know about the flags register, and doesn't know that
8640 it doesn't want to clobber it. We can only do this with PLUS. */
8641 gcc_assert (code == PLUS);
8646 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8647 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8650 /* Fix up the destination if needed. */
8651 if (dst != operands[0])
8652 emit_move_insn (operands[0], dst);
8655 /* Return TRUE or FALSE depending on whether the binary operator meets the
8656 appropriate constraints. */
8659 ix86_binary_operator_ok (enum rtx_code code,
8660 enum machine_mode mode ATTRIBUTE_UNUSED,
8663 /* Both source operands cannot be in memory. */
8664 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
8666 /* If the operation is not commutable, source 1 cannot be a constant. */
8667 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8669 /* If the destination is memory, we must have a matching source operand. */
8670 if (GET_CODE (operands[0]) == MEM
8671 && ! (rtx_equal_p (operands[0], operands[1])
8672 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8673 && rtx_equal_p (operands[0], operands[2]))))
8675 /* If the operation is not commutable and the source 1 is memory, we must
8676 have a matching destination. */
8677 if (GET_CODE (operands[1]) == MEM
8678 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
8679 && ! rtx_equal_p (operands[0], operands[1]))
8684 /* Attempt to expand a unary operator. Make the expansion closer to the
8685 actual machine, then just general_operand, which will allow 2 separate
8686 memory references (one output, one input) in a single insn. */
8689 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
8692 int matching_memory;
8693 rtx src, dst, op, clob;
8698 /* If the destination is memory, and we do not have matching source
8699 operands, do things in registers. */
8700 matching_memory = 0;
8703 if (rtx_equal_p (dst, src))
8704 matching_memory = 1;
8706 dst = gen_reg_rtx (mode);
8709 /* When source operand is memory, destination must match. */
8710 if (MEM_P (src) && !matching_memory)
8711 src = force_reg (mode, src);
8713 /* Emit the instruction. */
8715 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
8716 if (reload_in_progress || code == NOT)
8718 /* Reload doesn't know about the flags register, and doesn't know that
8719 it doesn't want to clobber it. */
8720 gcc_assert (code == NOT);
8725 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8726 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8729 /* Fix up the destination if needed. */
8730 if (dst != operands[0])
8731 emit_move_insn (operands[0], dst);
8734 /* Return TRUE or FALSE depending on whether the unary operator meets the
8735 appropriate constraints. */
8738 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
8739 enum machine_mode mode ATTRIBUTE_UNUSED,
8740 rtx operands[2] ATTRIBUTE_UNUSED)
8742 /* If one of operands is memory, source and destination must match. */
8743 if ((GET_CODE (operands[0]) == MEM
8744 || GET_CODE (operands[1]) == MEM)
8745 && ! rtx_equal_p (operands[0], operands[1]))
8750 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
8751 Create a mask for the sign bit in MODE for an SSE register. If VECT is
8752 true, then replicate the mask for all elements of the vector register.
8753 If INVERT is true, then create a mask excluding the sign bit. */
8756 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
8758 enum machine_mode vec_mode;
8759 HOST_WIDE_INT hi, lo;
8764 /* Find the sign bit, sign extended to 2*HWI. */
8766 lo = 0x80000000, hi = lo < 0;
8767 else if (HOST_BITS_PER_WIDE_INT >= 64)
8768 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8770 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8775 /* Force this value into the low part of a fp vector constant. */
8776 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
8777 mask = gen_lowpart (mode, mask);
8782 v = gen_rtvec (4, mask, mask, mask, mask);
8784 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8785 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8786 vec_mode = V4SFmode;
8791 v = gen_rtvec (2, mask, mask);
8793 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8794 vec_mode = V2DFmode;
8797 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
8800 /* Generate code for floating point ABS or NEG. */
8803 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
8806 rtx mask, set, use, clob, dst, src;
8807 bool matching_memory;
8808 bool use_sse = false;
8809 bool vector_mode = VECTOR_MODE_P (mode);
8810 enum machine_mode elt_mode = mode;
8814 elt_mode = GET_MODE_INNER (mode);
8817 else if (TARGET_SSE_MATH)
8818 use_sse = SSE_FLOAT_MODE_P (mode);
8820 /* NEG and ABS performed with SSE use bitwise mask operations.
8821 Create the appropriate mask now. */
8823 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
8826 /* When not using SSE, we don't use the mask, but prefer to keep the
8827 same general form of the insn pattern to reduce duplication when
8828 it comes time to split. */
8835 /* If the destination is memory, and we don't have matching source
8836 operands, do things in registers. */
8837 matching_memory = false;
8840 if (rtx_equal_p (dst, src))
8841 matching_memory = true;
8843 dst = gen_reg_rtx (mode);
8845 if (MEM_P (src) && !matching_memory)
8846 src = force_reg (mode, src);
8850 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8851 set = gen_rtx_SET (VOIDmode, dst, set);
8856 set = gen_rtx_fmt_e (code, mode, src);
8857 set = gen_rtx_SET (VOIDmode, dst, set);
8858 use = gen_rtx_USE (VOIDmode, mask);
8859 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8860 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8863 if (dst != operands[0])
8864 emit_move_insn (operands[0], dst);
8867 /* Expand a copysign operation. Special case operand 0 being a constant. */
8870 ix86_expand_copysign (rtx operands[])
8872 enum machine_mode mode, vmode;
8873 rtx dest, op0, op1, mask, nmask;
8879 mode = GET_MODE (dest);
8880 vmode = mode == SFmode ? V4SFmode : V2DFmode;
8882 if (GET_CODE (op0) == CONST_DOUBLE)
8886 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
8887 op0 = simplify_unary_operation (ABS, mode, op0, mode);
8889 if (op0 == CONST0_RTX (mode))
8890 op0 = CONST0_RTX (vmode);
8894 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
8895 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8897 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
8898 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
8901 mask = ix86_build_signbit_mask (mode, 0, 0);
8904 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
8906 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
8910 nmask = ix86_build_signbit_mask (mode, 0, 1);
8911 mask = ix86_build_signbit_mask (mode, 0, 0);
8914 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
8916 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
8920 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
8921 be a constant, and so has already been expanded into a vector constant. */
8924 ix86_split_copysign_const (rtx operands[])
8926 enum machine_mode mode, vmode;
8927 rtx dest, op0, op1, mask, x;
8934 mode = GET_MODE (dest);
8935 vmode = GET_MODE (mask);
8937 dest = simplify_gen_subreg (vmode, dest, mode, 0);
8938 x = gen_rtx_AND (vmode, dest, mask);
8939 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8941 if (op0 != CONST0_RTX (vmode))
8943 x = gen_rtx_IOR (vmode, dest, op0);
8944 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8948 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
8949 so we have to do two masks. */
8952 ix86_split_copysign_var (rtx operands[])
8954 enum machine_mode mode, vmode;
8955 rtx dest, scratch, op0, op1, mask, nmask, x;
8958 scratch = operands[1];
8961 nmask = operands[4];
8964 mode = GET_MODE (dest);
8965 vmode = GET_MODE (mask);
8967 if (rtx_equal_p (op0, op1))
8969 /* Shouldn't happen often (it's useless, obviously), but when it does
8970 we'd generate incorrect code if we continue below. */
8971 emit_move_insn (dest, op0);
8975 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
8977 gcc_assert (REGNO (op1) == REGNO (scratch));
8979 x = gen_rtx_AND (vmode, scratch, mask);
8980 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8983 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8984 x = gen_rtx_NOT (vmode, dest);
8985 x = gen_rtx_AND (vmode, x, op0);
8986 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8990 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
8992 x = gen_rtx_AND (vmode, scratch, mask);
8994 else /* alternative 2,4 */
8996 gcc_assert (REGNO (mask) == REGNO (scratch));
8997 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
8998 x = gen_rtx_AND (vmode, scratch, op1);
9000 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9002 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9004 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9005 x = gen_rtx_AND (vmode, dest, nmask);
9007 else /* alternative 3,4 */
9009 gcc_assert (REGNO (nmask) == REGNO (dest));
9011 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9012 x = gen_rtx_AND (vmode, dest, op0);
9014 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9017 x = gen_rtx_IOR (vmode, dest, scratch);
9018 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9021 /* Return TRUE or FALSE depending on whether the first SET in INSN
9022 has source and destination with matching CC modes, and that the
9023 CC mode is at least as constrained as REQ_MODE. */
9026 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9029 enum machine_mode set_mode;
9031 set = PATTERN (insn);
9032 if (GET_CODE (set) == PARALLEL)
9033 set = XVECEXP (set, 0, 0);
9034 gcc_assert (GET_CODE (set) == SET);
9035 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9037 set_mode = GET_MODE (SET_DEST (set));
9041 if (req_mode != CCNOmode
9042 && (req_mode != CCmode
9043 || XEXP (SET_SRC (set), 1) != const0_rtx))
9047 if (req_mode == CCGCmode)
9051 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9055 if (req_mode == CCZmode)
9065 return (GET_MODE (SET_SRC (set)) == set_mode);
9068 /* Generate insn patterns to do an integer compare of OPERANDS. */
9071 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9073 enum machine_mode cmpmode;
9076 cmpmode = SELECT_CC_MODE (code, op0, op1);
9077 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9079 /* This is very simple, but making the interface the same as in the
9080 FP case makes the rest of the code easier. */
9081 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9082 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9084 /* Return the test that should be put into the flags user, i.e.
9085 the bcc, scc, or cmov instruction. */
9086 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9089 /* Figure out whether to use ordered or unordered fp comparisons.
9090 Return the appropriate mode to use. */
9093 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9095 /* ??? In order to make all comparisons reversible, we do all comparisons
9096 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9097 all forms trapping and nontrapping comparisons, we can make inequality
9098 comparisons trapping again, since it results in better code when using
9099 FCOM based compares. */
9100 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9104 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9106 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9107 return ix86_fp_compare_mode (code);
9110 /* Only zero flag is needed. */
9112 case NE: /* ZF!=0 */
9114 /* Codes needing carry flag. */
9115 case GEU: /* CF=0 */
9116 case GTU: /* CF=0 & ZF=0 */
9117 case LTU: /* CF=1 */
9118 case LEU: /* CF=1 | ZF=1 */
9120 /* Codes possibly doable only with sign flag when
9121 comparing against zero. */
9122 case GE: /* SF=OF or SF=0 */
9123 case LT: /* SF<>OF or SF=1 */
9124 if (op1 == const0_rtx)
9127 /* For other cases Carry flag is not required. */
9129 /* Codes doable only with sign flag when comparing
9130 against zero, but we miss jump instruction for it
9131 so we need to use relational tests against overflow
9132 that thus needs to be zero. */
9133 case GT: /* ZF=0 & SF=OF */
9134 case LE: /* ZF=1 | SF<>OF */
9135 if (op1 == const0_rtx)
9139 /* strcmp pattern do (use flags) and combine may ask us for proper
9148 /* Return the fixed registers used for condition codes. */
9151 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9158 /* If two condition code modes are compatible, return a condition code
9159 mode which is compatible with both. Otherwise, return
9162 static enum machine_mode
9163 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
9168 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
9171 if ((m1 == CCGCmode && m2 == CCGOCmode)
9172 || (m1 == CCGOCmode && m2 == CCGCmode))
9200 /* These are only compatible with themselves, which we already
9206 /* Return true if we should use an FCOMI instruction for this fp comparison. */
9209 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
9211 enum rtx_code swapped_code = swap_condition (code);
9212 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
9213 || (ix86_fp_comparison_cost (swapped_code)
9214 == ix86_fp_comparison_fcomi_cost (swapped_code)));
9217 /* Swap, force into registers, or otherwise massage the two operands
9218 to a fp comparison. The operands are updated in place; the new
9219 comparison code is returned. */
9221 static enum rtx_code
9222 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
9224 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
9225 rtx op0 = *pop0, op1 = *pop1;
9226 enum machine_mode op_mode = GET_MODE (op0);
9227 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
9229 /* All of the unordered compare instructions only work on registers.
9230 The same is true of the fcomi compare instructions. The XFmode
9231 compare instructions require registers except when comparing
9232 against zero or when converting operand 1 from fixed point to
9236 && (fpcmp_mode == CCFPUmode
9237 || (op_mode == XFmode
9238 && ! (standard_80387_constant_p (op0) == 1
9239 || standard_80387_constant_p (op1) == 1)
9240 && GET_CODE (op1) != FLOAT)
9241 || ix86_use_fcomi_compare (code)))
9243 op0 = force_reg (op_mode, op0);
9244 op1 = force_reg (op_mode, op1);
9248 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
9249 things around if they appear profitable, otherwise force op0
9252 if (standard_80387_constant_p (op0) == 0
9253 || (GET_CODE (op0) == MEM
9254 && ! (standard_80387_constant_p (op1) == 0
9255 || GET_CODE (op1) == MEM)))
9258 tmp = op0, op0 = op1, op1 = tmp;
9259 code = swap_condition (code);
9262 if (GET_CODE (op0) != REG)
9263 op0 = force_reg (op_mode, op0);
9265 if (CONSTANT_P (op1))
9267 int tmp = standard_80387_constant_p (op1);
9269 op1 = validize_mem (force_const_mem (op_mode, op1));
9273 op1 = force_reg (op_mode, op1);
9276 op1 = force_reg (op_mode, op1);
9280 /* Try to rearrange the comparison to make it cheaper. */
9281 if (ix86_fp_comparison_cost (code)
9282 > ix86_fp_comparison_cost (swap_condition (code))
9283 && (GET_CODE (op1) == REG || !no_new_pseudos))
9286 tmp = op0, op0 = op1, op1 = tmp;
9287 code = swap_condition (code);
9288 if (GET_CODE (op0) != REG)
9289 op0 = force_reg (op_mode, op0);
9297 /* Convert comparison codes we use to represent FP comparison to integer
9298 code that will result in proper branch. Return UNKNOWN if no such code
9302 ix86_fp_compare_code_to_integer (enum rtx_code code)
9331 /* Split comparison code CODE into comparisons we can do using branch
9332 instructions. BYPASS_CODE is comparison code for branch that will
9333 branch around FIRST_CODE and SECOND_CODE. If some of branches
9334 is not required, set value to UNKNOWN.
9335 We never require more than two branches. */
9338 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
9339 enum rtx_code *first_code,
9340 enum rtx_code *second_code)
9343 *bypass_code = UNKNOWN;
9344 *second_code = UNKNOWN;
9346 /* The fcomi comparison sets flags as follows:
9356 case GT: /* GTU - CF=0 & ZF=0 */
9357 case GE: /* GEU - CF=0 */
9358 case ORDERED: /* PF=0 */
9359 case UNORDERED: /* PF=1 */
9360 case UNEQ: /* EQ - ZF=1 */
9361 case UNLT: /* LTU - CF=1 */
9362 case UNLE: /* LEU - CF=1 | ZF=1 */
9363 case LTGT: /* EQ - ZF=0 */
9365 case LT: /* LTU - CF=1 - fails on unordered */
9367 *bypass_code = UNORDERED;
9369 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
9371 *bypass_code = UNORDERED;
9373 case EQ: /* EQ - ZF=1 - fails on unordered */
9375 *bypass_code = UNORDERED;
9377 case NE: /* NE - ZF=0 - fails on unordered */
9379 *second_code = UNORDERED;
9381 case UNGE: /* GEU - CF=0 - fails on unordered */
9383 *second_code = UNORDERED;
9385 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
9387 *second_code = UNORDERED;
9392 if (!TARGET_IEEE_FP)
9394 *second_code = UNKNOWN;
9395 *bypass_code = UNKNOWN;
9399 /* Return cost of comparison done fcom + arithmetics operations on AX.
9400 All following functions do use number of instructions as a cost metrics.
9401 In future this should be tweaked to compute bytes for optimize_size and
9402 take into account performance of various instructions on various CPUs. */
9404 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
9406 if (!TARGET_IEEE_FP)
9408 /* The cost of code output by ix86_expand_fp_compare. */
9436 /* Return cost of comparison done using fcomi operation.
9437 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9439 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
9441 enum rtx_code bypass_code, first_code, second_code;
9442 /* Return arbitrarily high cost when instruction is not supported - this
9443 prevents gcc from using it. */
9446 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9447 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
9450 /* Return cost of comparison done using sahf operation.
9451 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9453 ix86_fp_comparison_sahf_cost (enum rtx_code code)
9455 enum rtx_code bypass_code, first_code, second_code;
9456 /* Return arbitrarily high cost when instruction is not preferred - this
9457 avoids gcc from using it. */
9458 if (!TARGET_USE_SAHF && !optimize_size)
9460 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9461 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
9464 /* Compute cost of the comparison done using any method.
9465 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9467 ix86_fp_comparison_cost (enum rtx_code code)
9469 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
9472 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
9473 sahf_cost = ix86_fp_comparison_sahf_cost (code);
9475 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
9476 if (min > sahf_cost)
9478 if (min > fcomi_cost)
9483 /* Generate insn patterns to do a floating point compare of OPERANDS. */
9486 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
9487 rtx *second_test, rtx *bypass_test)
9489 enum machine_mode fpcmp_mode, intcmp_mode;
9491 int cost = ix86_fp_comparison_cost (code);
9492 enum rtx_code bypass_code, first_code, second_code;
9494 fpcmp_mode = ix86_fp_compare_mode (code);
9495 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
9498 *second_test = NULL_RTX;
9500 *bypass_test = NULL_RTX;
9502 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9504 /* Do fcomi/sahf based test when profitable. */
9505 if ((bypass_code == UNKNOWN || bypass_test)
9506 && (second_code == UNKNOWN || second_test)
9507 && ix86_fp_comparison_arithmetics_cost (code) > cost)
9511 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9512 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
9518 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9519 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9521 scratch = gen_reg_rtx (HImode);
9522 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9523 emit_insn (gen_x86_sahf_1 (scratch));
9526 /* The FP codes work out to act like unsigned. */
9527 intcmp_mode = fpcmp_mode;
9529 if (bypass_code != UNKNOWN)
9530 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
9531 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9533 if (second_code != UNKNOWN)
9534 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
9535 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9540 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
9541 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9542 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9544 scratch = gen_reg_rtx (HImode);
9545 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9547 /* In the unordered case, we have to check C2 for NaN's, which
9548 doesn't happen to work out to anything nice combination-wise.
9549 So do some bit twiddling on the value we've got in AH to come
9550 up with an appropriate set of condition codes. */
9552 intcmp_mode = CCNOmode;
9557 if (code == GT || !TARGET_IEEE_FP)
9559 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9564 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9565 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9566 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
9567 intcmp_mode = CCmode;
9573 if (code == LT && TARGET_IEEE_FP)
9575 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9576 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
9577 intcmp_mode = CCmode;
9582 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
9588 if (code == GE || !TARGET_IEEE_FP)
9590 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
9595 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9596 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9603 if (code == LE && TARGET_IEEE_FP)
9605 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9606 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9607 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9608 intcmp_mode = CCmode;
9613 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9619 if (code == EQ && TARGET_IEEE_FP)
9621 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9622 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9623 intcmp_mode = CCmode;
9628 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9635 if (code == NE && TARGET_IEEE_FP)
9637 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9638 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9644 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9650 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9654 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9663 /* Return the test that should be put into the flags user, i.e.
9664 the bcc, scc, or cmov instruction. */
9665 return gen_rtx_fmt_ee (code, VOIDmode,
9666 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9671 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
9674 op0 = ix86_compare_op0;
9675 op1 = ix86_compare_op1;
9678 *second_test = NULL_RTX;
9680 *bypass_test = NULL_RTX;
9682 if (ix86_compare_emitted)
9684 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
9685 ix86_compare_emitted = NULL_RTX;
9687 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9688 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9689 second_test, bypass_test);
9691 ret = ix86_expand_int_compare (code, op0, op1);
9696 /* Return true if the CODE will result in nontrivial jump sequence. */
9698 ix86_fp_jump_nontrivial_p (enum rtx_code code)
9700 enum rtx_code bypass_code, first_code, second_code;
9703 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9704 return bypass_code != UNKNOWN || second_code != UNKNOWN;
9708 ix86_expand_branch (enum rtx_code code, rtx label)
9712 switch (GET_MODE (ix86_compare_op0))
9718 tmp = ix86_expand_compare (code, NULL, NULL);
9719 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9720 gen_rtx_LABEL_REF (VOIDmode, label),
9722 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
9731 enum rtx_code bypass_code, first_code, second_code;
9733 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
9736 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9738 /* Check whether we will use the natural sequence with one jump. If
9739 so, we can expand jump early. Otherwise delay expansion by
9740 creating compound insn to not confuse optimizers. */
9741 if (bypass_code == UNKNOWN && second_code == UNKNOWN
9744 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
9745 gen_rtx_LABEL_REF (VOIDmode, label),
9746 pc_rtx, NULL_RTX, NULL_RTX);
9750 tmp = gen_rtx_fmt_ee (code, VOIDmode,
9751 ix86_compare_op0, ix86_compare_op1);
9752 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9753 gen_rtx_LABEL_REF (VOIDmode, label),
9755 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
9757 use_fcomi = ix86_use_fcomi_compare (code);
9758 vec = rtvec_alloc (3 + !use_fcomi);
9759 RTVEC_ELT (vec, 0) = tmp;
9761 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
9763 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
9766 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
9768 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
9777 /* Expand DImode branch into multiple compare+branch. */
9779 rtx lo[2], hi[2], label2;
9780 enum rtx_code code1, code2, code3;
9781 enum machine_mode submode;
9783 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
9785 tmp = ix86_compare_op0;
9786 ix86_compare_op0 = ix86_compare_op1;
9787 ix86_compare_op1 = tmp;
9788 code = swap_condition (code);
9790 if (GET_MODE (ix86_compare_op0) == DImode)
9792 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
9793 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
9798 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
9799 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
9803 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
9804 avoid two branches. This costs one extra insn, so disable when
9805 optimizing for size. */
9807 if ((code == EQ || code == NE)
9809 || hi[1] == const0_rtx || lo[1] == const0_rtx))
9814 if (hi[1] != const0_rtx)
9815 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
9816 NULL_RTX, 0, OPTAB_WIDEN);
9819 if (lo[1] != const0_rtx)
9820 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
9821 NULL_RTX, 0, OPTAB_WIDEN);
9823 tmp = expand_binop (submode, ior_optab, xor1, xor0,
9824 NULL_RTX, 0, OPTAB_WIDEN);
9826 ix86_compare_op0 = tmp;
9827 ix86_compare_op1 = const0_rtx;
9828 ix86_expand_branch (code, label);
9832 /* Otherwise, if we are doing less-than or greater-or-equal-than,
9833 op1 is a constant and the low word is zero, then we can just
9834 examine the high word. */
9836 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
9839 case LT: case LTU: case GE: case GEU:
9840 ix86_compare_op0 = hi[0];
9841 ix86_compare_op1 = hi[1];
9842 ix86_expand_branch (code, label);
9848 /* Otherwise, we need two or three jumps. */
9850 label2 = gen_label_rtx ();
9853 code2 = swap_condition (code);
9854 code3 = unsigned_condition (code);
9858 case LT: case GT: case LTU: case GTU:
9861 case LE: code1 = LT; code2 = GT; break;
9862 case GE: code1 = GT; code2 = LT; break;
9863 case LEU: code1 = LTU; code2 = GTU; break;
9864 case GEU: code1 = GTU; code2 = LTU; break;
9866 case EQ: code1 = UNKNOWN; code2 = NE; break;
9867 case NE: code2 = UNKNOWN; break;
9875 * if (hi(a) < hi(b)) goto true;
9876 * if (hi(a) > hi(b)) goto false;
9877 * if (lo(a) < lo(b)) goto true;
9881 ix86_compare_op0 = hi[0];
9882 ix86_compare_op1 = hi[1];
9884 if (code1 != UNKNOWN)
9885 ix86_expand_branch (code1, label);
9886 if (code2 != UNKNOWN)
9887 ix86_expand_branch (code2, label2);
9889 ix86_compare_op0 = lo[0];
9890 ix86_compare_op1 = lo[1];
9891 ix86_expand_branch (code3, label);
9893 if (code2 != UNKNOWN)
9894 emit_label (label2);
9903 /* Split branch based on floating point condition. */
9905 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
9906 rtx target1, rtx target2, rtx tmp, rtx pushed)
9909 rtx label = NULL_RTX;
9911 int bypass_probability = -1, second_probability = -1, probability = -1;
9914 if (target2 != pc_rtx)
9917 code = reverse_condition_maybe_unordered (code);
9922 condition = ix86_expand_fp_compare (code, op1, op2,
9923 tmp, &second, &bypass);
9925 /* Remove pushed operand from stack. */
9927 ix86_free_from_memory (GET_MODE (pushed));
9929 if (split_branch_probability >= 0)
9931 /* Distribute the probabilities across the jumps.
9932 Assume the BYPASS and SECOND to be always test
9934 probability = split_branch_probability;
9936 /* Value of 1 is low enough to make no need for probability
9937 to be updated. Later we may run some experiments and see
9938 if unordered values are more frequent in practice. */
9940 bypass_probability = 1;
9942 second_probability = 1;
9944 if (bypass != NULL_RTX)
9946 label = gen_label_rtx ();
9947 i = emit_jump_insn (gen_rtx_SET
9949 gen_rtx_IF_THEN_ELSE (VOIDmode,
9951 gen_rtx_LABEL_REF (VOIDmode,
9954 if (bypass_probability >= 0)
9956 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9957 GEN_INT (bypass_probability),
9960 i = emit_jump_insn (gen_rtx_SET
9962 gen_rtx_IF_THEN_ELSE (VOIDmode,
9963 condition, target1, target2)));
9964 if (probability >= 0)
9966 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9967 GEN_INT (probability),
9969 if (second != NULL_RTX)
9971 i = emit_jump_insn (gen_rtx_SET
9973 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9975 if (second_probability >= 0)
9977 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9978 GEN_INT (second_probability),
9981 if (label != NULL_RTX)
9986 ix86_expand_setcc (enum rtx_code code, rtx dest)
9988 rtx ret, tmp, tmpreg, equiv;
9989 rtx second_test, bypass_test;
9991 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
9992 return 0; /* FAIL */
9994 gcc_assert (GET_MODE (dest) == QImode);
9996 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9997 PUT_MODE (ret, QImode);
10002 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10003 if (bypass_test || second_test)
10005 rtx test = second_test;
10007 rtx tmp2 = gen_reg_rtx (QImode);
10010 gcc_assert (!second_test);
10011 test = bypass_test;
10013 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10015 PUT_MODE (test, QImode);
10016 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10019 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10021 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10024 /* Attach a REG_EQUAL note describing the comparison result. */
10025 if (ix86_compare_op0 && ix86_compare_op1)
10027 equiv = simplify_gen_relational (code, QImode,
10028 GET_MODE (ix86_compare_op0),
10029 ix86_compare_op0, ix86_compare_op1);
10030 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10033 return 1; /* DONE */
10036 /* Expand comparison setting or clearing carry flag. Return true when
10037 successful and set pop for the operation. */
10039 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10041 enum machine_mode mode =
10042 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10044 /* Do not handle DImode compares that go trought special path. Also we can't
10045 deal with FP compares yet. This is possible to add. */
10046 if (mode == (TARGET_64BIT ? TImode : DImode))
10048 if (FLOAT_MODE_P (mode))
10050 rtx second_test = NULL, bypass_test = NULL;
10051 rtx compare_op, compare_seq;
10053 /* Shortcut: following common codes never translate into carry flag compares. */
10054 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10055 || code == ORDERED || code == UNORDERED)
10058 /* These comparisons require zero flag; swap operands so they won't. */
10059 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10060 && !TARGET_IEEE_FP)
10065 code = swap_condition (code);
10068 /* Try to expand the comparison and verify that we end up with carry flag
10069 based comparison. This is fails to be true only when we decide to expand
10070 comparison using arithmetic that is not too common scenario. */
10072 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10073 &second_test, &bypass_test);
10074 compare_seq = get_insns ();
10077 if (second_test || bypass_test)
10079 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10080 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10081 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10083 code = GET_CODE (compare_op);
10084 if (code != LTU && code != GEU)
10086 emit_insn (compare_seq);
10090 if (!INTEGRAL_MODE_P (mode))
10098 /* Convert a==0 into (unsigned)a<1. */
10101 if (op1 != const0_rtx)
10104 code = (code == EQ ? LTU : GEU);
10107 /* Convert a>b into b<a or a>=b-1. */
10110 if (GET_CODE (op1) == CONST_INT)
10112 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10113 /* Bail out on overflow. We still can swap operands but that
10114 would force loading of the constant into register. */
10115 if (op1 == const0_rtx
10116 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10118 code = (code == GTU ? GEU : LTU);
10125 code = (code == GTU ? LTU : GEU);
10129 /* Convert a>=0 into (unsigned)a<0x80000000. */
10132 if (mode == DImode || op1 != const0_rtx)
10134 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10135 code = (code == LT ? GEU : LTU);
10139 if (mode == DImode || op1 != constm1_rtx)
10141 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10142 code = (code == LE ? GEU : LTU);
10148 /* Swapping operands may cause constant to appear as first operand. */
10149 if (!nonimmediate_operand (op0, VOIDmode))
10151 if (no_new_pseudos)
10153 op0 = force_reg (mode, op0);
10155 ix86_compare_op0 = op0;
10156 ix86_compare_op1 = op1;
10157 *pop = ix86_expand_compare (code, NULL, NULL);
10158 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
10163 ix86_expand_int_movcc (rtx operands[])
10165 enum rtx_code code = GET_CODE (operands[1]), compare_code;
10166 rtx compare_seq, compare_op;
10167 rtx second_test, bypass_test;
10168 enum machine_mode mode = GET_MODE (operands[0]);
10169 bool sign_bit_compare_p = false;;
10172 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10173 compare_seq = get_insns ();
10176 compare_code = GET_CODE (compare_op);
10178 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
10179 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
10180 sign_bit_compare_p = true;
10182 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
10183 HImode insns, we'd be swallowed in word prefix ops. */
10185 if ((mode != HImode || TARGET_FAST_PREFIX)
10186 && (mode != (TARGET_64BIT ? TImode : DImode))
10187 && GET_CODE (operands[2]) == CONST_INT
10188 && GET_CODE (operands[3]) == CONST_INT)
10190 rtx out = operands[0];
10191 HOST_WIDE_INT ct = INTVAL (operands[2]);
10192 HOST_WIDE_INT cf = INTVAL (operands[3]);
10193 HOST_WIDE_INT diff;
10196 /* Sign bit compares are better done using shifts than we do by using
10198 if (sign_bit_compare_p
10199 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10200 ix86_compare_op1, &compare_op))
10202 /* Detect overlap between destination and compare sources. */
10205 if (!sign_bit_compare_p)
10207 bool fpcmp = false;
10209 compare_code = GET_CODE (compare_op);
10211 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10212 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10215 compare_code = ix86_fp_compare_code_to_integer (compare_code);
10218 /* To simplify rest of code, restrict to the GEU case. */
10219 if (compare_code == LTU)
10221 HOST_WIDE_INT tmp = ct;
10224 compare_code = reverse_condition (compare_code);
10225 code = reverse_condition (code);
10230 PUT_CODE (compare_op,
10231 reverse_condition_maybe_unordered
10232 (GET_CODE (compare_op)));
10234 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10238 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
10239 || reg_overlap_mentioned_p (out, ix86_compare_op1))
10240 tmp = gen_reg_rtx (mode);
10242 if (mode == DImode)
10243 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
10245 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
10249 if (code == GT || code == GE)
10250 code = reverse_condition (code);
10253 HOST_WIDE_INT tmp = ct;
10258 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
10259 ix86_compare_op1, VOIDmode, 0, -1);
10272 tmp = expand_simple_binop (mode, PLUS,
10274 copy_rtx (tmp), 1, OPTAB_DIRECT);
10285 tmp = expand_simple_binop (mode, IOR,
10287 copy_rtx (tmp), 1, OPTAB_DIRECT);
10289 else if (diff == -1 && ct)
10299 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10301 tmp = expand_simple_binop (mode, PLUS,
10302 copy_rtx (tmp), GEN_INT (cf),
10303 copy_rtx (tmp), 1, OPTAB_DIRECT);
10311 * andl cf - ct, dest
10321 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10324 tmp = expand_simple_binop (mode, AND,
10326 gen_int_mode (cf - ct, mode),
10327 copy_rtx (tmp), 1, OPTAB_DIRECT);
10329 tmp = expand_simple_binop (mode, PLUS,
10330 copy_rtx (tmp), GEN_INT (ct),
10331 copy_rtx (tmp), 1, OPTAB_DIRECT);
10334 if (!rtx_equal_p (tmp, out))
10335 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
10337 return 1; /* DONE */
10343 tmp = ct, ct = cf, cf = tmp;
10345 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10347 /* We may be reversing unordered compare to normal compare, that
10348 is not valid in general (we may convert non-trapping condition
10349 to trapping one), however on i386 we currently emit all
10350 comparisons unordered. */
10351 compare_code = reverse_condition_maybe_unordered (compare_code);
10352 code = reverse_condition_maybe_unordered (code);
10356 compare_code = reverse_condition (compare_code);
10357 code = reverse_condition (code);
10361 compare_code = UNKNOWN;
10362 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
10363 && GET_CODE (ix86_compare_op1) == CONST_INT)
10365 if (ix86_compare_op1 == const0_rtx
10366 && (code == LT || code == GE))
10367 compare_code = code;
10368 else if (ix86_compare_op1 == constm1_rtx)
10372 else if (code == GT)
10377 /* Optimize dest = (op0 < 0) ? -1 : cf. */
10378 if (compare_code != UNKNOWN
10379 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
10380 && (cf == -1 || ct == -1))
10382 /* If lea code below could be used, only optimize
10383 if it results in a 2 insn sequence. */
10385 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
10386 || diff == 3 || diff == 5 || diff == 9)
10387 || (compare_code == LT && ct == -1)
10388 || (compare_code == GE && cf == -1))
10391 * notl op1 (if necessary)
10399 code = reverse_condition (code);
10402 out = emit_store_flag (out, code, ix86_compare_op0,
10403 ix86_compare_op1, VOIDmode, 0, -1);
10405 out = expand_simple_binop (mode, IOR,
10407 out, 1, OPTAB_DIRECT);
10408 if (out != operands[0])
10409 emit_move_insn (operands[0], out);
10411 return 1; /* DONE */
10416 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
10417 || diff == 3 || diff == 5 || diff == 9)
10418 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
10420 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
10426 * lea cf(dest*(ct-cf)),dest
10430 * This also catches the degenerate setcc-only case.
10436 out = emit_store_flag (out, code, ix86_compare_op0,
10437 ix86_compare_op1, VOIDmode, 0, 1);
10440 /* On x86_64 the lea instruction operates on Pmode, so we need
10441 to get arithmetics done in proper mode to match. */
10443 tmp = copy_rtx (out);
10447 out1 = copy_rtx (out);
10448 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
10452 tmp = gen_rtx_PLUS (mode, tmp, out1);
10458 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
10461 if (!rtx_equal_p (tmp, out))
10464 out = force_operand (tmp, copy_rtx (out));
10466 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
10468 if (!rtx_equal_p (out, operands[0]))
10469 emit_move_insn (operands[0], copy_rtx (out));
10471 return 1; /* DONE */
10475 * General case: Jumpful:
10476 * xorl dest,dest cmpl op1, op2
10477 * cmpl op1, op2 movl ct, dest
10478 * setcc dest jcc 1f
10479 * decl dest movl cf, dest
10480 * andl (cf-ct),dest 1:
10483 * Size 20. Size 14.
10485 * This is reasonably steep, but branch mispredict costs are
10486 * high on modern cpus, so consider failing only if optimizing
10490 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10491 && BRANCH_COST >= 2)
10497 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10498 /* We may be reversing unordered compare to normal compare,
10499 that is not valid in general (we may convert non-trapping
10500 condition to trapping one), however on i386 we currently
10501 emit all comparisons unordered. */
10502 code = reverse_condition_maybe_unordered (code);
10505 code = reverse_condition (code);
10506 if (compare_code != UNKNOWN)
10507 compare_code = reverse_condition (compare_code);
10511 if (compare_code != UNKNOWN)
10513 /* notl op1 (if needed)
10518 For x < 0 (resp. x <= -1) there will be no notl,
10519 so if possible swap the constants to get rid of the
10521 True/false will be -1/0 while code below (store flag
10522 followed by decrement) is 0/-1, so the constants need
10523 to be exchanged once more. */
10525 if (compare_code == GE || !cf)
10527 code = reverse_condition (code);
10532 HOST_WIDE_INT tmp = cf;
10537 out = emit_store_flag (out, code, ix86_compare_op0,
10538 ix86_compare_op1, VOIDmode, 0, -1);
10542 out = emit_store_flag (out, code, ix86_compare_op0,
10543 ix86_compare_op1, VOIDmode, 0, 1);
10545 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
10546 copy_rtx (out), 1, OPTAB_DIRECT);
10549 out = expand_simple_binop (mode, AND, copy_rtx (out),
10550 gen_int_mode (cf - ct, mode),
10551 copy_rtx (out), 1, OPTAB_DIRECT);
10553 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
10554 copy_rtx (out), 1, OPTAB_DIRECT);
10555 if (!rtx_equal_p (out, operands[0]))
10556 emit_move_insn (operands[0], copy_rtx (out));
10558 return 1; /* DONE */
10562 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10564 /* Try a few things more with specific constants and a variable. */
10567 rtx var, orig_out, out, tmp;
10569 if (BRANCH_COST <= 2)
10570 return 0; /* FAIL */
10572 /* If one of the two operands is an interesting constant, load a
10573 constant with the above and mask it in with a logical operation. */
10575 if (GET_CODE (operands[2]) == CONST_INT)
10578 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
10579 operands[3] = constm1_rtx, op = and_optab;
10580 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
10581 operands[3] = const0_rtx, op = ior_optab;
10583 return 0; /* FAIL */
10585 else if (GET_CODE (operands[3]) == CONST_INT)
10588 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
10589 operands[2] = constm1_rtx, op = and_optab;
10590 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
10591 operands[2] = const0_rtx, op = ior_optab;
10593 return 0; /* FAIL */
10596 return 0; /* FAIL */
10598 orig_out = operands[0];
10599 tmp = gen_reg_rtx (mode);
10602 /* Recurse to get the constant loaded. */
10603 if (ix86_expand_int_movcc (operands) == 0)
10604 return 0; /* FAIL */
10606 /* Mask in the interesting variable. */
10607 out = expand_binop (mode, op, var, tmp, orig_out, 0,
10609 if (!rtx_equal_p (out, orig_out))
10610 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
10612 return 1; /* DONE */
10616 * For comparison with above,
10626 if (! nonimmediate_operand (operands[2], mode))
10627 operands[2] = force_reg (mode, operands[2]);
10628 if (! nonimmediate_operand (operands[3], mode))
10629 operands[3] = force_reg (mode, operands[3]);
10631 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10633 rtx tmp = gen_reg_rtx (mode);
10634 emit_move_insn (tmp, operands[3]);
10637 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10639 rtx tmp = gen_reg_rtx (mode);
10640 emit_move_insn (tmp, operands[2]);
10644 if (! register_operand (operands[2], VOIDmode)
10646 || ! register_operand (operands[3], VOIDmode)))
10647 operands[2] = force_reg (mode, operands[2]);
10650 && ! register_operand (operands[3], VOIDmode))
10651 operands[3] = force_reg (mode, operands[3]);
10653 emit_insn (compare_seq);
10654 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10655 gen_rtx_IF_THEN_ELSE (mode,
10656 compare_op, operands[2],
10659 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10660 gen_rtx_IF_THEN_ELSE (mode,
10662 copy_rtx (operands[3]),
10663 copy_rtx (operands[0]))));
10665 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10666 gen_rtx_IF_THEN_ELSE (mode,
10668 copy_rtx (operands[2]),
10669 copy_rtx (operands[0]))));
10671 return 1; /* DONE */
10674 /* Swap, force into registers, or otherwise massage the two operands
10675 to an sse comparison with a mask result. Thus we differ a bit from
10676 ix86_prepare_fp_compare_args which expects to produce a flags result.
10678 The DEST operand exists to help determine whether to commute commutative
10679 operators. The POP0/POP1 operands are updated in place. The new
10680 comparison code is returned, or UNKNOWN if not implementable. */
10682 static enum rtx_code
10683 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
10684 rtx *pop0, rtx *pop1)
10692 /* We have no LTGT as an operator. We could implement it with
10693 NE & ORDERED, but this requires an extra temporary. It's
10694 not clear that it's worth it. */
10701 /* These are supported directly. */
10708 /* For commutative operators, try to canonicalize the destination
10709 operand to be first in the comparison - this helps reload to
10710 avoid extra moves. */
10711 if (!dest || !rtx_equal_p (dest, *pop1))
10719 /* These are not supported directly. Swap the comparison operands
10720 to transform into something that is supported. */
10724 code = swap_condition (code);
10728 gcc_unreachable ();
10734 /* Detect conditional moves that exactly match min/max operational
10735 semantics. Note that this is IEEE safe, as long as we don't
10736 interchange the operands.
10738 Returns FALSE if this conditional move doesn't match a MIN/MAX,
10739 and TRUE if the operation is successful and instructions are emitted. */
10742 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
10743 rtx cmp_op1, rtx if_true, rtx if_false)
10745 enum machine_mode mode;
10751 else if (code == UNGE)
10754 if_true = if_false;
10760 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
10762 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
10767 mode = GET_MODE (dest);
10769 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
10770 but MODE may be a vector mode and thus not appropriate. */
10771 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
10773 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
10776 if_true = force_reg (mode, if_true);
10777 v = gen_rtvec (2, if_true, if_false);
10778 tmp = gen_rtx_UNSPEC (mode, v, u);
10782 code = is_min ? SMIN : SMAX;
10783 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
10786 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
10790 /* Expand an sse vector comparison. Return the register with the result. */
10793 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
10794 rtx op_true, rtx op_false)
10796 enum machine_mode mode = GET_MODE (dest);
10799 cmp_op0 = force_reg (mode, cmp_op0);
10800 if (!nonimmediate_operand (cmp_op1, mode))
10801 cmp_op1 = force_reg (mode, cmp_op1);
10804 || reg_overlap_mentioned_p (dest, op_true)
10805 || reg_overlap_mentioned_p (dest, op_false))
10806 dest = gen_reg_rtx (mode);
10808 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
10809 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10814 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
10815 operations. This is used for both scalar and vector conditional moves. */
10818 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
10820 enum machine_mode mode = GET_MODE (dest);
10823 if (op_false == CONST0_RTX (mode))
10825 op_true = force_reg (mode, op_true);
10826 x = gen_rtx_AND (mode, cmp, op_true);
10827 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10829 else if (op_true == CONST0_RTX (mode))
10831 op_false = force_reg (mode, op_false);
10832 x = gen_rtx_NOT (mode, cmp);
10833 x = gen_rtx_AND (mode, x, op_false);
10834 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10838 op_true = force_reg (mode, op_true);
10839 op_false = force_reg (mode, op_false);
10841 t2 = gen_reg_rtx (mode);
10843 t3 = gen_reg_rtx (mode);
10847 x = gen_rtx_AND (mode, op_true, cmp);
10848 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
10850 x = gen_rtx_NOT (mode, cmp);
10851 x = gen_rtx_AND (mode, x, op_false);
10852 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
10854 x = gen_rtx_IOR (mode, t3, t2);
10855 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10859 /* Expand a floating-point conditional move. Return true if successful. */
10862 ix86_expand_fp_movcc (rtx operands[])
10864 enum machine_mode mode = GET_MODE (operands[0]);
10865 enum rtx_code code = GET_CODE (operands[1]);
10866 rtx tmp, compare_op, second_test, bypass_test;
10868 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
10870 enum machine_mode cmode;
10872 /* Since we've no cmove for sse registers, don't force bad register
10873 allocation just to gain access to it. Deny movcc when the
10874 comparison mode doesn't match the move mode. */
10875 cmode = GET_MODE (ix86_compare_op0);
10876 if (cmode == VOIDmode)
10877 cmode = GET_MODE (ix86_compare_op1);
10881 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10883 &ix86_compare_op1);
10884 if (code == UNKNOWN)
10887 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
10888 ix86_compare_op1, operands[2],
10892 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
10893 ix86_compare_op1, operands[2], operands[3]);
10894 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
10898 /* The floating point conditional move instructions don't directly
10899 support conditions resulting from a signed integer comparison. */
10901 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10903 /* The floating point conditional move instructions don't directly
10904 support signed integer comparisons. */
10906 if (!fcmov_comparison_operator (compare_op, VOIDmode))
10908 gcc_assert (!second_test && !bypass_test);
10909 tmp = gen_reg_rtx (QImode);
10910 ix86_expand_setcc (code, tmp);
10912 ix86_compare_op0 = tmp;
10913 ix86_compare_op1 = const0_rtx;
10914 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10916 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10918 tmp = gen_reg_rtx (mode);
10919 emit_move_insn (tmp, operands[3]);
10922 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10924 tmp = gen_reg_rtx (mode);
10925 emit_move_insn (tmp, operands[2]);
10929 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10930 gen_rtx_IF_THEN_ELSE (mode, compare_op,
10931 operands[2], operands[3])));
10933 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10934 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
10935 operands[3], operands[0])));
10937 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10938 gen_rtx_IF_THEN_ELSE (mode, second_test,
10939 operands[2], operands[0])));
10944 /* Expand a floating-point vector conditional move; a vcond operation
10945 rather than a movcc operation. */
10948 ix86_expand_fp_vcond (rtx operands[])
10950 enum rtx_code code = GET_CODE (operands[3]);
10953 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10954 &operands[4], &operands[5]);
10955 if (code == UNKNOWN)
10958 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
10959 operands[5], operands[1], operands[2]))
10962 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
10963 operands[1], operands[2]);
10964 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
10968 /* Expand a signed integral vector conditional move. */
10971 ix86_expand_int_vcond (rtx operands[])
10973 enum machine_mode mode = GET_MODE (operands[0]);
10974 enum rtx_code code = GET_CODE (operands[3]);
10975 bool negate = false;
10978 cop0 = operands[4];
10979 cop1 = operands[5];
10981 /* Canonicalize the comparison to EQ, GT, GTU. */
10992 code = reverse_condition (code);
10998 code = reverse_condition (code);
11004 code = swap_condition (code);
11005 x = cop0, cop0 = cop1, cop1 = x;
11009 gcc_unreachable ();
11012 /* Unsigned parallel compare is not supported by the hardware. Play some
11013 tricks to turn this into a signed comparison against 0. */
11022 /* Perform a parallel modulo subtraction. */
11023 t1 = gen_reg_rtx (mode);
11024 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11026 /* Extract the original sign bit of op0. */
11027 mask = GEN_INT (-0x80000000);
11028 mask = gen_rtx_CONST_VECTOR (mode,
11029 gen_rtvec (4, mask, mask, mask, mask));
11030 mask = force_reg (mode, mask);
11031 t2 = gen_reg_rtx (mode);
11032 emit_insn (gen_andv4si3 (t2, cop0, mask));
11034 /* XOR it back into the result of the subtraction. This results
11035 in the sign bit set iff we saw unsigned underflow. */
11036 x = gen_reg_rtx (mode);
11037 emit_insn (gen_xorv4si3 (x, t1, t2));
11045 /* Perform a parallel unsigned saturating subtraction. */
11046 x = gen_reg_rtx (mode);
11047 emit_insn (gen_rtx_SET (VOIDmode, x,
11048 gen_rtx_US_MINUS (mode, cop0, cop1)));
11055 gcc_unreachable ();
11059 cop1 = CONST0_RTX (mode);
11062 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11063 operands[1+negate], operands[2-negate]);
11065 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11066 operands[2-negate]);
11070 /* Expand conditional increment or decrement using adb/sbb instructions.
11071 The default case using setcc followed by the conditional move can be
11072 done by generic code. */
11074 ix86_expand_int_addcc (rtx operands[])
11076 enum rtx_code code = GET_CODE (operands[1]);
11078 rtx val = const0_rtx;
11079 bool fpcmp = false;
11080 enum machine_mode mode = GET_MODE (operands[0]);
11082 if (operands[3] != const1_rtx
11083 && operands[3] != constm1_rtx)
11085 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11086 ix86_compare_op1, &compare_op))
11088 code = GET_CODE (compare_op);
11090 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11091 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11094 code = ix86_fp_compare_code_to_integer (code);
11101 PUT_CODE (compare_op,
11102 reverse_condition_maybe_unordered
11103 (GET_CODE (compare_op)));
11105 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11107 PUT_MODE (compare_op, mode);
11109 /* Construct either adc or sbb insn. */
11110 if ((code == LTU) == (operands[3] == constm1_rtx))
11112 switch (GET_MODE (operands[0]))
11115 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
11118 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
11121 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
11124 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
11127 gcc_unreachable ();
11132 switch (GET_MODE (operands[0]))
11135 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
11138 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
11141 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
11144 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
11147 gcc_unreachable ();
11150 return 1; /* DONE */
11154 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
11155 works for floating pointer parameters and nonoffsetable memories.
11156 For pushes, it returns just stack offsets; the values will be saved
11157 in the right order. Maximally three parts are generated. */
11160 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
11165 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
11167 size = (GET_MODE_SIZE (mode) + 4) / 8;
11169 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
11170 gcc_assert (size >= 2 && size <= 3);
11172 /* Optimize constant pool reference to immediates. This is used by fp
11173 moves, that force all constants to memory to allow combining. */
11174 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
11176 rtx tmp = maybe_get_pool_constant (operand);
11181 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
11183 /* The only non-offsetable memories we handle are pushes. */
11184 int ok = push_operand (operand, VOIDmode);
11188 operand = copy_rtx (operand);
11189 PUT_MODE (operand, Pmode);
11190 parts[0] = parts[1] = parts[2] = operand;
11194 if (GET_CODE (operand) == CONST_VECTOR)
11196 enum machine_mode imode = int_mode_for_mode (mode);
11197 /* Caution: if we looked through a constant pool memory above,
11198 the operand may actually have a different mode now. That's
11199 ok, since we want to pun this all the way back to an integer. */
11200 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
11201 gcc_assert (operand != NULL);
11207 if (mode == DImode)
11208 split_di (&operand, 1, &parts[0], &parts[1]);
11211 if (REG_P (operand))
11213 gcc_assert (reload_completed);
11214 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
11215 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
11217 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
11219 else if (offsettable_memref_p (operand))
11221 operand = adjust_address (operand, SImode, 0);
11222 parts[0] = operand;
11223 parts[1] = adjust_address (operand, SImode, 4);
11225 parts[2] = adjust_address (operand, SImode, 8);
11227 else if (GET_CODE (operand) == CONST_DOUBLE)
11232 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11236 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
11237 parts[2] = gen_int_mode (l[2], SImode);
11240 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
11243 gcc_unreachable ();
11245 parts[1] = gen_int_mode (l[1], SImode);
11246 parts[0] = gen_int_mode (l[0], SImode);
11249 gcc_unreachable ();
11254 if (mode == TImode)
11255 split_ti (&operand, 1, &parts[0], &parts[1]);
11256 if (mode == XFmode || mode == TFmode)
11258 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
11259 if (REG_P (operand))
11261 gcc_assert (reload_completed);
11262 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
11263 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
11265 else if (offsettable_memref_p (operand))
11267 operand = adjust_address (operand, DImode, 0);
11268 parts[0] = operand;
11269 parts[1] = adjust_address (operand, upper_mode, 8);
11271 else if (GET_CODE (operand) == CONST_DOUBLE)
11276 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11277 real_to_target (l, &r, mode);
11279 /* Do not use shift by 32 to avoid warning on 32bit systems. */
11280 if (HOST_BITS_PER_WIDE_INT >= 64)
11283 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
11284 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
11287 parts[0] = immed_double_const (l[0], l[1], DImode);
11289 if (upper_mode == SImode)
11290 parts[1] = gen_int_mode (l[2], SImode);
11291 else if (HOST_BITS_PER_WIDE_INT >= 64)
11294 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
11295 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
11298 parts[1] = immed_double_const (l[2], l[3], DImode);
11301 gcc_unreachable ();
11308 /* Emit insns to perform a move or push of DI, DF, and XF values.
11309 Return false when normal moves are needed; true when all required
11310 insns have been emitted. Operands 2-4 contain the input values
11311 int the correct order; operands 5-7 contain the output values. */
11314 ix86_split_long_move (rtx operands[])
11319 int collisions = 0;
11320 enum machine_mode mode = GET_MODE (operands[0]);
11322 /* The DFmode expanders may ask us to move double.
11323 For 64bit target this is single move. By hiding the fact
11324 here we simplify i386.md splitters. */
11325 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
11327 /* Optimize constant pool reference to immediates. This is used by
11328 fp moves, that force all constants to memory to allow combining. */
11330 if (GET_CODE (operands[1]) == MEM
11331 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
11332 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
11333 operands[1] = get_pool_constant (XEXP (operands[1], 0));
11334 if (push_operand (operands[0], VOIDmode))
11336 operands[0] = copy_rtx (operands[0]);
11337 PUT_MODE (operands[0], Pmode);
11340 operands[0] = gen_lowpart (DImode, operands[0]);
11341 operands[1] = gen_lowpart (DImode, operands[1]);
11342 emit_move_insn (operands[0], operands[1]);
11346 /* The only non-offsettable memory we handle is push. */
11347 if (push_operand (operands[0], VOIDmode))
11350 gcc_assert (GET_CODE (operands[0]) != MEM
11351 || offsettable_memref_p (operands[0]));
11353 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
11354 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
11356 /* When emitting push, take care for source operands on the stack. */
11357 if (push && GET_CODE (operands[1]) == MEM
11358 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
11361 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
11362 XEXP (part[1][2], 0));
11363 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
11364 XEXP (part[1][1], 0));
11367 /* We need to do copy in the right order in case an address register
11368 of the source overlaps the destination. */
11369 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
11371 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
11373 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11376 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
11379 /* Collision in the middle part can be handled by reordering. */
11380 if (collisions == 1 && nparts == 3
11381 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11384 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
11385 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
11388 /* If there are more collisions, we can't handle it by reordering.
11389 Do an lea to the last part and use only one colliding move. */
11390 else if (collisions > 1)
11396 base = part[0][nparts - 1];
11398 /* Handle the case when the last part isn't valid for lea.
11399 Happens in 64-bit mode storing the 12-byte XFmode. */
11400 if (GET_MODE (base) != Pmode)
11401 base = gen_rtx_REG (Pmode, REGNO (base));
11403 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
11404 part[1][0] = replace_equiv_address (part[1][0], base);
11405 part[1][1] = replace_equiv_address (part[1][1],
11406 plus_constant (base, UNITS_PER_WORD));
11408 part[1][2] = replace_equiv_address (part[1][2],
11409 plus_constant (base, 8));
11419 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
11420 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
11421 emit_move_insn (part[0][2], part[1][2]);
11426 /* In 64bit mode we don't have 32bit push available. In case this is
11427 register, it is OK - we will just use larger counterpart. We also
11428 retype memory - these comes from attempt to avoid REX prefix on
11429 moving of second half of TFmode value. */
11430 if (GET_MODE (part[1][1]) == SImode)
11432 switch (GET_CODE (part[1][1]))
11435 part[1][1] = adjust_address (part[1][1], DImode, 0);
11439 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
11443 gcc_unreachable ();
11446 if (GET_MODE (part[1][0]) == SImode)
11447 part[1][0] = part[1][1];
11450 emit_move_insn (part[0][1], part[1][1]);
11451 emit_move_insn (part[0][0], part[1][0]);
11455 /* Choose correct order to not overwrite the source before it is copied. */
11456 if ((REG_P (part[0][0])
11457 && REG_P (part[1][1])
11458 && (REGNO (part[0][0]) == REGNO (part[1][1])
11460 && REGNO (part[0][0]) == REGNO (part[1][2]))))
11462 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
11466 operands[2] = part[0][2];
11467 operands[3] = part[0][1];
11468 operands[4] = part[0][0];
11469 operands[5] = part[1][2];
11470 operands[6] = part[1][1];
11471 operands[7] = part[1][0];
11475 operands[2] = part[0][1];
11476 operands[3] = part[0][0];
11477 operands[5] = part[1][1];
11478 operands[6] = part[1][0];
11485 operands[2] = part[0][0];
11486 operands[3] = part[0][1];
11487 operands[4] = part[0][2];
11488 operands[5] = part[1][0];
11489 operands[6] = part[1][1];
11490 operands[7] = part[1][2];
11494 operands[2] = part[0][0];
11495 operands[3] = part[0][1];
11496 operands[5] = part[1][0];
11497 operands[6] = part[1][1];
11501 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
11504 if (GET_CODE (operands[5]) == CONST_INT
11505 && operands[5] != const0_rtx
11506 && REG_P (operands[2]))
11508 if (GET_CODE (operands[6]) == CONST_INT
11509 && INTVAL (operands[6]) == INTVAL (operands[5]))
11510 operands[6] = operands[2];
11513 && GET_CODE (operands[7]) == CONST_INT
11514 && INTVAL (operands[7]) == INTVAL (operands[5]))
11515 operands[7] = operands[2];
11519 && GET_CODE (operands[6]) == CONST_INT
11520 && operands[6] != const0_rtx
11521 && REG_P (operands[3])
11522 && GET_CODE (operands[7]) == CONST_INT
11523 && INTVAL (operands[7]) == INTVAL (operands[6]))
11524 operands[7] = operands[3];
11527 emit_move_insn (operands[2], operands[5]);
11528 emit_move_insn (operands[3], operands[6]);
11530 emit_move_insn (operands[4], operands[7]);
11535 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
11536 left shift by a constant, either using a single shift or
11537 a sequence of add instructions. */
11540 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
11544 emit_insn ((mode == DImode
11546 : gen_adddi3) (operand, operand, operand));
11548 else if (!optimize_size
11549 && count * ix86_cost->add <= ix86_cost->shift_const)
11552 for (i=0; i<count; i++)
11554 emit_insn ((mode == DImode
11556 : gen_adddi3) (operand, operand, operand));
11560 emit_insn ((mode == DImode
11562 : gen_ashldi3) (operand, operand, GEN_INT (count)));
11566 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
11568 rtx low[2], high[2];
11570 const int single_width = mode == DImode ? 32 : 64;
11572 if (GET_CODE (operands[2]) == CONST_INT)
11574 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11575 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11577 if (count >= single_width)
11579 emit_move_insn (high[0], low[1]);
11580 emit_move_insn (low[0], const0_rtx);
11582 if (count > single_width)
11583 ix86_expand_ashl_const (high[0], count - single_width, mode);
11587 if (!rtx_equal_p (operands[0], operands[1]))
11588 emit_move_insn (operands[0], operands[1]);
11589 emit_insn ((mode == DImode
11591 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
11592 ix86_expand_ashl_const (low[0], count, mode);
11597 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11599 if (operands[1] == const1_rtx)
11601 /* Assuming we've chosen a QImode capable registers, then 1 << N
11602 can be done with two 32/64-bit shifts, no branches, no cmoves. */
11603 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
11605 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
11607 ix86_expand_clear (low[0]);
11608 ix86_expand_clear (high[0]);
11609 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
11611 d = gen_lowpart (QImode, low[0]);
11612 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11613 s = gen_rtx_EQ (QImode, flags, const0_rtx);
11614 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11616 d = gen_lowpart (QImode, high[0]);
11617 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11618 s = gen_rtx_NE (QImode, flags, const0_rtx);
11619 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11622 /* Otherwise, we can get the same results by manually performing
11623 a bit extract operation on bit 5/6, and then performing the two
11624 shifts. The two methods of getting 0/1 into low/high are exactly
11625 the same size. Avoiding the shift in the bit extract case helps
11626 pentium4 a bit; no one else seems to care much either way. */
11631 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
11632 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
11634 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
11635 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
11637 emit_insn ((mode == DImode
11639 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
11640 emit_insn ((mode == DImode
11642 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
11643 emit_move_insn (low[0], high[0]);
11644 emit_insn ((mode == DImode
11646 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
11649 emit_insn ((mode == DImode
11651 : gen_ashldi3) (low[0], low[0], operands[2]));
11652 emit_insn ((mode == DImode
11654 : gen_ashldi3) (high[0], high[0], operands[2]));
11658 if (operands[1] == constm1_rtx)
11660 /* For -1 << N, we can avoid the shld instruction, because we
11661 know that we're shifting 0...31/63 ones into a -1. */
11662 emit_move_insn (low[0], constm1_rtx);
11664 emit_move_insn (high[0], low[0]);
11666 emit_move_insn (high[0], constm1_rtx);
11670 if (!rtx_equal_p (operands[0], operands[1]))
11671 emit_move_insn (operands[0], operands[1]);
11673 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11674 emit_insn ((mode == DImode
11676 : gen_x86_64_shld) (high[0], low[0], operands[2]));
11679 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
11681 if (TARGET_CMOVE && scratch)
11683 ix86_expand_clear (scratch);
11684 emit_insn ((mode == DImode
11685 ? gen_x86_shift_adj_1
11686 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
11689 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
11693 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
11695 rtx low[2], high[2];
11697 const int single_width = mode == DImode ? 32 : 64;
11699 if (GET_CODE (operands[2]) == CONST_INT)
11701 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11702 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11704 if (count == single_width * 2 - 1)
11706 emit_move_insn (high[0], high[1]);
11707 emit_insn ((mode == DImode
11709 : gen_ashrdi3) (high[0], high[0],
11710 GEN_INT (single_width - 1)));
11711 emit_move_insn (low[0], high[0]);
11714 else if (count >= single_width)
11716 emit_move_insn (low[0], high[1]);
11717 emit_move_insn (high[0], low[0]);
11718 emit_insn ((mode == DImode
11720 : gen_ashrdi3) (high[0], high[0],
11721 GEN_INT (single_width - 1)));
11722 if (count > single_width)
11723 emit_insn ((mode == DImode
11725 : gen_ashrdi3) (low[0], low[0],
11726 GEN_INT (count - single_width)));
11730 if (!rtx_equal_p (operands[0], operands[1]))
11731 emit_move_insn (operands[0], operands[1]);
11732 emit_insn ((mode == DImode
11734 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11735 emit_insn ((mode == DImode
11737 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
11742 if (!rtx_equal_p (operands[0], operands[1]))
11743 emit_move_insn (operands[0], operands[1]);
11745 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11747 emit_insn ((mode == DImode
11749 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11750 emit_insn ((mode == DImode
11752 : gen_ashrdi3) (high[0], high[0], operands[2]));
11754 if (TARGET_CMOVE && scratch)
11756 emit_move_insn (scratch, high[0]);
11757 emit_insn ((mode == DImode
11759 : gen_ashrdi3) (scratch, scratch,
11760 GEN_INT (single_width - 1)));
11761 emit_insn ((mode == DImode
11762 ? gen_x86_shift_adj_1
11763 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11767 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
11772 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
11774 rtx low[2], high[2];
11776 const int single_width = mode == DImode ? 32 : 64;
11778 if (GET_CODE (operands[2]) == CONST_INT)
11780 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11781 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11783 if (count >= single_width)
11785 emit_move_insn (low[0], high[1]);
11786 ix86_expand_clear (high[0]);
11788 if (count > single_width)
11789 emit_insn ((mode == DImode
11791 : gen_lshrdi3) (low[0], low[0],
11792 GEN_INT (count - single_width)));
11796 if (!rtx_equal_p (operands[0], operands[1]))
11797 emit_move_insn (operands[0], operands[1]);
11798 emit_insn ((mode == DImode
11800 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11801 emit_insn ((mode == DImode
11803 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
11808 if (!rtx_equal_p (operands[0], operands[1]))
11809 emit_move_insn (operands[0], operands[1]);
11811 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11813 emit_insn ((mode == DImode
11815 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11816 emit_insn ((mode == DImode
11818 : gen_lshrdi3) (high[0], high[0], operands[2]));
11820 /* Heh. By reversing the arguments, we can reuse this pattern. */
11821 if (TARGET_CMOVE && scratch)
11823 ix86_expand_clear (scratch);
11824 emit_insn ((mode == DImode
11825 ? gen_x86_shift_adj_1
11826 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11830 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
11834 /* Helper function for the string operations below. Dest VARIABLE whether
11835 it is aligned to VALUE bytes. If true, jump to the label. */
11837 ix86_expand_aligntest (rtx variable, int value)
11839 rtx label = gen_label_rtx ();
11840 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
11841 if (GET_MODE (variable) == DImode)
11842 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
11844 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
11845 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
11850 /* Adjust COUNTER by the VALUE. */
11852 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
11854 if (GET_MODE (countreg) == DImode)
11855 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
11857 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
11860 /* Zero extend possibly SImode EXP to Pmode register. */
11862 ix86_zero_extend_to_Pmode (rtx exp)
11865 if (GET_MODE (exp) == VOIDmode)
11866 return force_reg (Pmode, exp);
11867 if (GET_MODE (exp) == Pmode)
11868 return copy_to_mode_reg (Pmode, exp);
11869 r = gen_reg_rtx (Pmode);
11870 emit_insn (gen_zero_extendsidi2 (r, exp));
11874 /* Expand string move (memcpy) operation. Use i386 string operations when
11875 profitable. expand_clrmem contains similar code. */
11877 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
11879 rtx srcreg, destreg, countreg, srcexp, destexp;
11880 enum machine_mode counter_mode;
11881 HOST_WIDE_INT align = 0;
11882 unsigned HOST_WIDE_INT count = 0;
11884 if (GET_CODE (align_exp) == CONST_INT)
11885 align = INTVAL (align_exp);
11887 /* Can't use any of this if the user has appropriated esi or edi. */
11888 if (global_regs[4] || global_regs[5])
11891 /* This simple hack avoids all inlining code and simplifies code below. */
11892 if (!TARGET_ALIGN_STRINGOPS)
11895 if (GET_CODE (count_exp) == CONST_INT)
11897 count = INTVAL (count_exp);
11898 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11902 /* Figure out proper mode for counter. For 32bits it is always SImode,
11903 for 64bits use SImode when possible, otherwise DImode.
11904 Set count to number of bytes copied when known at compile time. */
11906 || GET_MODE (count_exp) == SImode
11907 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
11908 counter_mode = SImode;
11910 counter_mode = DImode;
11912 gcc_assert (counter_mode == SImode || counter_mode == DImode);
11914 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
11915 if (destreg != XEXP (dst, 0))
11916 dst = replace_equiv_address_nv (dst, destreg);
11917 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
11918 if (srcreg != XEXP (src, 0))
11919 src = replace_equiv_address_nv (src, srcreg);
11921 /* When optimizing for size emit simple rep ; movsb instruction for
11922 counts not divisible by 4, except when (movsl;)*(movsw;)?(movsb;)?
11923 sequence is shorter than mov{b,l} $count, %{ecx,cl}; rep; movsb.
11924 Sice of (movsl;)*(movsw;)?(movsb;)? sequence is
11925 count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
11926 but we don't know whether upper 24 (resp. 56) bits of %ecx will be
11927 known to be zero or not. The rep; movsb sequence causes higher
11928 register pressure though, so take that into account. */
11930 if ((!optimize || optimize_size)
11935 || (count & 3) + count / 4 > 6))))
11937 emit_insn (gen_cld ());
11938 countreg = ix86_zero_extend_to_Pmode (count_exp);
11939 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
11940 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
11941 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
11945 /* For constant aligned (or small unaligned) copies use rep movsl
11946 followed by code copying the rest. For PentiumPro ensure 8 byte
11947 alignment to allow rep movsl acceleration. */
11949 else if (count != 0
11951 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11952 || optimize_size || count < (unsigned int) 64))
11954 unsigned HOST_WIDE_INT offset = 0;
11955 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11956 rtx srcmem, dstmem;
11958 emit_insn (gen_cld ());
11959 if (count & ~(size - 1))
11961 if ((TARGET_SINGLE_STRINGOP || optimize_size) && count < 5 * 4)
11963 enum machine_mode movs_mode = size == 4 ? SImode : DImode;
11965 while (offset < (count & ~(size - 1)))
11967 srcmem = adjust_automodify_address_nv (src, movs_mode,
11969 dstmem = adjust_automodify_address_nv (dst, movs_mode,
11971 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11977 countreg = GEN_INT ((count >> (size == 4 ? 2 : 3))
11978 & (TARGET_64BIT ? -1 : 0x3fffffff));
11979 countreg = copy_to_mode_reg (counter_mode, countreg);
11980 countreg = ix86_zero_extend_to_Pmode (countreg);
11982 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11983 GEN_INT (size == 4 ? 2 : 3));
11984 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11985 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11987 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11988 countreg, destexp, srcexp));
11989 offset = count & ~(size - 1);
11992 if (size == 8 && (count & 0x04))
11994 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
11996 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
11998 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12003 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
12005 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
12007 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12012 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
12014 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
12016 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12019 /* The generic code based on the glibc implementation:
12020 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
12021 allowing accelerated copying there)
12022 - copy the data using rep movsl
12023 - copy the rest. */
12028 rtx srcmem, dstmem;
12029 int desired_alignment = (TARGET_PENTIUMPRO
12030 && (count == 0 || count >= (unsigned int) 260)
12031 ? 8 : UNITS_PER_WORD);
12032 /* Get rid of MEM_OFFSETs, they won't be accurate. */
12033 dst = change_address (dst, BLKmode, destreg);
12034 src = change_address (src, BLKmode, srcreg);
12036 /* In case we don't know anything about the alignment, default to
12037 library version, since it is usually equally fast and result in
12040 Also emit call when we know that the count is large and call overhead
12041 will not be important. */
12042 if (!TARGET_INLINE_ALL_STRINGOPS
12043 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12046 if (TARGET_SINGLE_STRINGOP)
12047 emit_insn (gen_cld ());
12049 countreg2 = gen_reg_rtx (Pmode);
12050 countreg = copy_to_mode_reg (counter_mode, count_exp);
12052 /* We don't use loops to align destination and to copy parts smaller
12053 than 4 bytes, because gcc is able to optimize such code better (in
12054 the case the destination or the count really is aligned, gcc is often
12055 able to predict the branches) and also it is friendlier to the
12056 hardware branch prediction.
12058 Using loops is beneficial for generic case, because we can
12059 handle small counts using the loops. Many CPUs (such as Athlon)
12060 have large REP prefix setup costs.
12062 This is quite costly. Maybe we can revisit this decision later or
12063 add some customizability to this code. */
12065 if (count == 0 && align < desired_alignment)
12067 label = gen_label_rtx ();
12068 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12069 LEU, 0, counter_mode, 1, label);
12073 rtx label = ix86_expand_aligntest (destreg, 1);
12074 srcmem = change_address (src, QImode, srcreg);
12075 dstmem = change_address (dst, QImode, destreg);
12076 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12077 ix86_adjust_counter (countreg, 1);
12078 emit_label (label);
12079 LABEL_NUSES (label) = 1;
12083 rtx label = ix86_expand_aligntest (destreg, 2);
12084 srcmem = change_address (src, HImode, srcreg);
12085 dstmem = change_address (dst, HImode, destreg);
12086 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12087 ix86_adjust_counter (countreg, 2);
12088 emit_label (label);
12089 LABEL_NUSES (label) = 1;
12091 if (align <= 4 && desired_alignment > 4)
12093 rtx label = ix86_expand_aligntest (destreg, 4);
12094 srcmem = change_address (src, SImode, srcreg);
12095 dstmem = change_address (dst, SImode, destreg);
12096 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12097 ix86_adjust_counter (countreg, 4);
12098 emit_label (label);
12099 LABEL_NUSES (label) = 1;
12102 if (label && desired_alignment > 4 && !TARGET_64BIT)
12104 emit_label (label);
12105 LABEL_NUSES (label) = 1;
12108 if (!TARGET_SINGLE_STRINGOP)
12109 emit_insn (gen_cld ());
12112 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12114 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12118 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12119 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12121 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
12122 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12123 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
12124 countreg2, destexp, srcexp));
12128 emit_label (label);
12129 LABEL_NUSES (label) = 1;
12131 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12133 srcmem = change_address (src, SImode, srcreg);
12134 dstmem = change_address (dst, SImode, destreg);
12135 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12137 if ((align <= 4 || count == 0) && TARGET_64BIT)
12139 rtx label = ix86_expand_aligntest (countreg, 4);
12140 srcmem = change_address (src, SImode, srcreg);
12141 dstmem = change_address (dst, SImode, destreg);
12142 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12143 emit_label (label);
12144 LABEL_NUSES (label) = 1;
12146 if (align > 2 && count != 0 && (count & 2))
12148 srcmem = change_address (src, HImode, srcreg);
12149 dstmem = change_address (dst, HImode, destreg);
12150 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12152 if (align <= 2 || count == 0)
12154 rtx label = ix86_expand_aligntest (countreg, 2);
12155 srcmem = change_address (src, HImode, srcreg);
12156 dstmem = change_address (dst, HImode, destreg);
12157 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12158 emit_label (label);
12159 LABEL_NUSES (label) = 1;
12161 if (align > 1 && count != 0 && (count & 1))
12163 srcmem = change_address (src, QImode, srcreg);
12164 dstmem = change_address (dst, QImode, destreg);
12165 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12167 if (align <= 1 || count == 0)
12169 rtx label = ix86_expand_aligntest (countreg, 1);
12170 srcmem = change_address (src, QImode, srcreg);
12171 dstmem = change_address (dst, QImode, destreg);
12172 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12173 emit_label (label);
12174 LABEL_NUSES (label) = 1;
12181 /* Expand string clear operation (bzero). Use i386 string operations when
12182 profitable. expand_movmem contains similar code. */
12184 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
12186 rtx destreg, zeroreg, countreg, destexp;
12187 enum machine_mode counter_mode;
12188 HOST_WIDE_INT align = 0;
12189 unsigned HOST_WIDE_INT count = 0;
12191 if (GET_CODE (align_exp) == CONST_INT)
12192 align = INTVAL (align_exp);
12194 /* Can't use any of this if the user has appropriated esi. */
12195 if (global_regs[4])
12198 /* This simple hack avoids all inlining code and simplifies code below. */
12199 if (!TARGET_ALIGN_STRINGOPS)
12202 if (GET_CODE (count_exp) == CONST_INT)
12204 count = INTVAL (count_exp);
12205 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
12208 /* Figure out proper mode for counter. For 32bits it is always SImode,
12209 for 64bits use SImode when possible, otherwise DImode.
12210 Set count to number of bytes copied when known at compile time. */
12212 || GET_MODE (count_exp) == SImode
12213 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
12214 counter_mode = SImode;
12216 counter_mode = DImode;
12218 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
12219 if (destreg != XEXP (dst, 0))
12220 dst = replace_equiv_address_nv (dst, destreg);
12223 /* When optimizing for size emit simple rep ; movsb instruction for
12224 counts not divisible by 4. The movl $N, %ecx; rep; stosb
12225 sequence is 7 bytes long, so if optimizing for size and count is
12226 small enough that some stosl, stosw and stosb instructions without
12227 rep are shorter, fall back into the next if. */
12229 if ((!optimize || optimize_size)
12232 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
12234 emit_insn (gen_cld ());
12236 countreg = ix86_zero_extend_to_Pmode (count_exp);
12237 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
12238 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
12239 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
12241 else if (count != 0
12243 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
12244 || optimize_size || count < (unsigned int) 64))
12246 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
12247 unsigned HOST_WIDE_INT offset = 0;
12249 emit_insn (gen_cld ());
12251 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
12252 if (count & ~(size - 1))
12254 unsigned HOST_WIDE_INT repcount;
12255 unsigned int max_nonrep;
12257 repcount = count >> (size == 4 ? 2 : 3);
12259 repcount &= 0x3fffffff;
12261 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
12262 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
12263 bytes. In both cases the latter seems to be faster for small
12265 max_nonrep = size == 4 ? 7 : 4;
12266 if (!optimize_size)
12269 case PROCESSOR_PENTIUM4:
12270 case PROCESSOR_NOCONA:
12277 if (repcount <= max_nonrep)
12278 while (repcount-- > 0)
12280 rtx mem = adjust_automodify_address_nv (dst,
12281 GET_MODE (zeroreg),
12283 emit_insn (gen_strset (destreg, mem, zeroreg));
12288 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
12289 countreg = ix86_zero_extend_to_Pmode (countreg);
12290 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12291 GEN_INT (size == 4 ? 2 : 3));
12292 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12293 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
12295 offset = count & ~(size - 1);
12298 if (size == 8 && (count & 0x04))
12300 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
12302 emit_insn (gen_strset (destreg, mem,
12303 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12308 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
12310 emit_insn (gen_strset (destreg, mem,
12311 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12316 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
12318 emit_insn (gen_strset (destreg, mem,
12319 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12326 /* Compute desired alignment of the string operation. */
12327 int desired_alignment = (TARGET_PENTIUMPRO
12328 && (count == 0 || count >= (unsigned int) 260)
12329 ? 8 : UNITS_PER_WORD);
12331 /* In case we don't know anything about the alignment, default to
12332 library version, since it is usually equally fast and result in
12335 Also emit call when we know that the count is large and call overhead
12336 will not be important. */
12337 if (!TARGET_INLINE_ALL_STRINGOPS
12338 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12341 if (TARGET_SINGLE_STRINGOP)
12342 emit_insn (gen_cld ());
12344 countreg2 = gen_reg_rtx (Pmode);
12345 countreg = copy_to_mode_reg (counter_mode, count_exp);
12346 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
12347 /* Get rid of MEM_OFFSET, it won't be accurate. */
12348 dst = change_address (dst, BLKmode, destreg);
12350 if (count == 0 && align < desired_alignment)
12352 label = gen_label_rtx ();
12353 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12354 LEU, 0, counter_mode, 1, label);
12358 rtx label = ix86_expand_aligntest (destreg, 1);
12359 emit_insn (gen_strset (destreg, dst,
12360 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12361 ix86_adjust_counter (countreg, 1);
12362 emit_label (label);
12363 LABEL_NUSES (label) = 1;
12367 rtx label = ix86_expand_aligntest (destreg, 2);
12368 emit_insn (gen_strset (destreg, dst,
12369 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12370 ix86_adjust_counter (countreg, 2);
12371 emit_label (label);
12372 LABEL_NUSES (label) = 1;
12374 if (align <= 4 && desired_alignment > 4)
12376 rtx label = ix86_expand_aligntest (destreg, 4);
12377 emit_insn (gen_strset (destreg, dst,
12379 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
12381 ix86_adjust_counter (countreg, 4);
12382 emit_label (label);
12383 LABEL_NUSES (label) = 1;
12386 if (label && desired_alignment > 4 && !TARGET_64BIT)
12388 emit_label (label);
12389 LABEL_NUSES (label) = 1;
12393 if (!TARGET_SINGLE_STRINGOP)
12394 emit_insn (gen_cld ());
12397 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12399 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12403 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12404 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12406 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12407 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
12411 emit_label (label);
12412 LABEL_NUSES (label) = 1;
12415 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12416 emit_insn (gen_strset (destreg, dst,
12417 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12418 if (TARGET_64BIT && (align <= 4 || count == 0))
12420 rtx label = ix86_expand_aligntest (countreg, 4);
12421 emit_insn (gen_strset (destreg, dst,
12422 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12423 emit_label (label);
12424 LABEL_NUSES (label) = 1;
12426 if (align > 2 && count != 0 && (count & 2))
12427 emit_insn (gen_strset (destreg, dst,
12428 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12429 if (align <= 2 || count == 0)
12431 rtx label = ix86_expand_aligntest (countreg, 2);
12432 emit_insn (gen_strset (destreg, dst,
12433 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12434 emit_label (label);
12435 LABEL_NUSES (label) = 1;
12437 if (align > 1 && count != 0 && (count & 1))
12438 emit_insn (gen_strset (destreg, dst,
12439 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12440 if (align <= 1 || count == 0)
12442 rtx label = ix86_expand_aligntest (countreg, 1);
12443 emit_insn (gen_strset (destreg, dst,
12444 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12445 emit_label (label);
12446 LABEL_NUSES (label) = 1;
12452 /* Expand strlen. */
12454 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
12456 rtx addr, scratch1, scratch2, scratch3, scratch4;
12458 /* The generic case of strlen expander is long. Avoid it's
12459 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
12461 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12462 && !TARGET_INLINE_ALL_STRINGOPS
12464 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
12467 addr = force_reg (Pmode, XEXP (src, 0));
12468 scratch1 = gen_reg_rtx (Pmode);
12470 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12473 /* Well it seems that some optimizer does not combine a call like
12474 foo(strlen(bar), strlen(bar));
12475 when the move and the subtraction is done here. It does calculate
12476 the length just once when these instructions are done inside of
12477 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
12478 often used and I use one fewer register for the lifetime of
12479 output_strlen_unroll() this is better. */
12481 emit_move_insn (out, addr);
12483 ix86_expand_strlensi_unroll_1 (out, src, align);
12485 /* strlensi_unroll_1 returns the address of the zero at the end of
12486 the string, like memchr(), so compute the length by subtracting
12487 the start address. */
12489 emit_insn (gen_subdi3 (out, out, addr));
12491 emit_insn (gen_subsi3 (out, out, addr));
12496 scratch2 = gen_reg_rtx (Pmode);
12497 scratch3 = gen_reg_rtx (Pmode);
12498 scratch4 = force_reg (Pmode, constm1_rtx);
12500 emit_move_insn (scratch3, addr);
12501 eoschar = force_reg (QImode, eoschar);
12503 emit_insn (gen_cld ());
12504 src = replace_equiv_address_nv (src, scratch3);
12506 /* If .md starts supporting :P, this can be done in .md. */
12507 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
12508 scratch4), UNSPEC_SCAS);
12509 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
12512 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
12513 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
12517 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
12518 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
12524 /* Expand the appropriate insns for doing strlen if not just doing
12527 out = result, initialized with the start address
12528 align_rtx = alignment of the address.
12529 scratch = scratch register, initialized with the startaddress when
12530 not aligned, otherwise undefined
12532 This is just the body. It needs the initializations mentioned above and
12533 some address computing at the end. These things are done in i386.md. */
12536 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
12540 rtx align_2_label = NULL_RTX;
12541 rtx align_3_label = NULL_RTX;
12542 rtx align_4_label = gen_label_rtx ();
12543 rtx end_0_label = gen_label_rtx ();
12545 rtx tmpreg = gen_reg_rtx (SImode);
12546 rtx scratch = gen_reg_rtx (SImode);
12550 if (GET_CODE (align_rtx) == CONST_INT)
12551 align = INTVAL (align_rtx);
12553 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
12555 /* Is there a known alignment and is it less than 4? */
12558 rtx scratch1 = gen_reg_rtx (Pmode);
12559 emit_move_insn (scratch1, out);
12560 /* Is there a known alignment and is it not 2? */
12563 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
12564 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
12566 /* Leave just the 3 lower bits. */
12567 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
12568 NULL_RTX, 0, OPTAB_WIDEN);
12570 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12571 Pmode, 1, align_4_label);
12572 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
12573 Pmode, 1, align_2_label);
12574 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
12575 Pmode, 1, align_3_label);
12579 /* Since the alignment is 2, we have to check 2 or 0 bytes;
12580 check if is aligned to 4 - byte. */
12582 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
12583 NULL_RTX, 0, OPTAB_WIDEN);
12585 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12586 Pmode, 1, align_4_label);
12589 mem = change_address (src, QImode, out);
12591 /* Now compare the bytes. */
12593 /* Compare the first n unaligned byte on a byte per byte basis. */
12594 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
12595 QImode, 1, end_0_label);
12597 /* Increment the address. */
12599 emit_insn (gen_adddi3 (out, out, const1_rtx));
12601 emit_insn (gen_addsi3 (out, out, const1_rtx));
12603 /* Not needed with an alignment of 2 */
12606 emit_label (align_2_label);
12608 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12612 emit_insn (gen_adddi3 (out, out, const1_rtx));
12614 emit_insn (gen_addsi3 (out, out, const1_rtx));
12616 emit_label (align_3_label);
12619 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12623 emit_insn (gen_adddi3 (out, out, const1_rtx));
12625 emit_insn (gen_addsi3 (out, out, const1_rtx));
12628 /* Generate loop to check 4 bytes at a time. It is not a good idea to
12629 align this loop. It gives only huge programs, but does not help to
12631 emit_label (align_4_label);
12633 mem = change_address (src, SImode, out);
12634 emit_move_insn (scratch, mem);
12636 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
12638 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
12640 /* This formula yields a nonzero result iff one of the bytes is zero.
12641 This saves three branches inside loop and many cycles. */
12643 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
12644 emit_insn (gen_one_cmplsi2 (scratch, scratch));
12645 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
12646 emit_insn (gen_andsi3 (tmpreg, tmpreg,
12647 gen_int_mode (0x80808080, SImode)));
12648 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
12653 rtx reg = gen_reg_rtx (SImode);
12654 rtx reg2 = gen_reg_rtx (Pmode);
12655 emit_move_insn (reg, tmpreg);
12656 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
12658 /* If zero is not in the first two bytes, move two bytes forward. */
12659 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12660 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12661 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12662 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
12663 gen_rtx_IF_THEN_ELSE (SImode, tmp,
12666 /* Emit lea manually to avoid clobbering of flags. */
12667 emit_insn (gen_rtx_SET (SImode, reg2,
12668 gen_rtx_PLUS (Pmode, out, const2_rtx)));
12670 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12671 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12672 emit_insn (gen_rtx_SET (VOIDmode, out,
12673 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
12680 rtx end_2_label = gen_label_rtx ();
12681 /* Is zero in the first two bytes? */
12683 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12684 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12685 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
12686 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12687 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
12689 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12690 JUMP_LABEL (tmp) = end_2_label;
12692 /* Not in the first two. Move two bytes forward. */
12693 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
12695 emit_insn (gen_adddi3 (out, out, const2_rtx));
12697 emit_insn (gen_addsi3 (out, out, const2_rtx));
12699 emit_label (end_2_label);
12703 /* Avoid branch in fixing the byte. */
12704 tmpreg = gen_lowpart (QImode, tmpreg);
12705 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
12706 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
12708 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
12710 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
12712 emit_label (end_0_label);
12716 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
12717 rtx callarg2 ATTRIBUTE_UNUSED,
12718 rtx pop, int sibcall)
12720 rtx use = NULL, call;
12722 if (pop == const0_rtx)
12724 gcc_assert (!TARGET_64BIT || !pop);
12727 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
12728 fnaddr = machopic_indirect_call_target (fnaddr);
12730 /* Static functions and indirect calls don't need the pic register. */
12731 if (! TARGET_64BIT && flag_pic
12732 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
12733 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
12734 use_reg (&use, pic_offset_table_rtx);
12736 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
12738 rtx al = gen_rtx_REG (QImode, 0);
12739 emit_move_insn (al, callarg2);
12740 use_reg (&use, al);
12742 #endif /* TARGET_MACHO */
12744 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
12746 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12747 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12749 if (sibcall && TARGET_64BIT
12750 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
12753 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12754 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
12755 emit_move_insn (fnaddr, addr);
12756 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12759 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
12761 call = gen_rtx_SET (VOIDmode, retval, call);
12764 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
12765 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
12766 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
12769 call = emit_call_insn (call);
12771 CALL_INSN_FUNCTION_USAGE (call) = use;
12775 /* Clear stack slot assignments remembered from previous functions.
12776 This is called from INIT_EXPANDERS once before RTL is emitted for each
12779 static struct machine_function *
12780 ix86_init_machine_status (void)
12782 struct machine_function *f;
12784 f = ggc_alloc_cleared (sizeof (struct machine_function));
12785 f->use_fast_prologue_epilogue_nregs = -1;
12790 /* Return a MEM corresponding to a stack slot with mode MODE.
12791 Allocate a new slot if necessary.
12793 The RTL for a function can have several slots available: N is
12794 which slot to use. */
12797 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
12799 struct stack_local_entry *s;
12801 gcc_assert (n < MAX_386_STACK_LOCALS);
12803 for (s = ix86_stack_locals; s; s = s->next)
12804 if (s->mode == mode && s->n == n)
12807 s = (struct stack_local_entry *)
12808 ggc_alloc (sizeof (struct stack_local_entry));
12811 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
12813 s->next = ix86_stack_locals;
12814 ix86_stack_locals = s;
12818 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12820 static GTY(()) rtx ix86_tls_symbol;
12822 ix86_tls_get_addr (void)
12825 if (!ix86_tls_symbol)
12827 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
12828 (TARGET_GNU_TLS && !TARGET_64BIT)
12829 ? "___tls_get_addr"
12830 : "__tls_get_addr");
12833 return ix86_tls_symbol;
12836 /* Calculate the length of the memory address in the instruction
12837 encoding. Does not include the one-byte modrm, opcode, or prefix. */
12840 memory_address_length (rtx addr)
12842 struct ix86_address parts;
12843 rtx base, index, disp;
12847 if (GET_CODE (addr) == PRE_DEC
12848 || GET_CODE (addr) == POST_INC
12849 || GET_CODE (addr) == PRE_MODIFY
12850 || GET_CODE (addr) == POST_MODIFY)
12853 ok = ix86_decompose_address (addr, &parts);
12856 if (parts.base && GET_CODE (parts.base) == SUBREG)
12857 parts.base = SUBREG_REG (parts.base);
12858 if (parts.index && GET_CODE (parts.index) == SUBREG)
12859 parts.index = SUBREG_REG (parts.index);
12862 index = parts.index;
12867 - esp as the base always wants an index,
12868 - ebp as the base always wants a displacement. */
12870 /* Register Indirect. */
12871 if (base && !index && !disp)
12873 /* esp (for its index) and ebp (for its displacement) need
12874 the two-byte modrm form. */
12875 if (addr == stack_pointer_rtx
12876 || addr == arg_pointer_rtx
12877 || addr == frame_pointer_rtx
12878 || addr == hard_frame_pointer_rtx)
12882 /* Direct Addressing. */
12883 else if (disp && !base && !index)
12888 /* Find the length of the displacement constant. */
12891 if (GET_CODE (disp) == CONST_INT
12892 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
12898 /* ebp always wants a displacement. */
12899 else if (base == hard_frame_pointer_rtx)
12902 /* An index requires the two-byte modrm form.... */
12904 /* ...like esp, which always wants an index. */
12905 || base == stack_pointer_rtx
12906 || base == arg_pointer_rtx
12907 || base == frame_pointer_rtx)
12914 /* Compute default value for "length_immediate" attribute. When SHORTFORM
12915 is set, expect that insn have 8bit immediate alternative. */
12917 ix86_attr_length_immediate_default (rtx insn, int shortform)
12921 extract_insn_cached (insn);
12922 for (i = recog_data.n_operands - 1; i >= 0; --i)
12923 if (CONSTANT_P (recog_data.operand[i]))
12927 && GET_CODE (recog_data.operand[i]) == CONST_INT
12928 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
12932 switch (get_attr_mode (insn))
12943 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
12948 fatal_insn ("unknown insn mode", insn);
12954 /* Compute default value for "length_address" attribute. */
12956 ix86_attr_length_address_default (rtx insn)
12960 if (get_attr_type (insn) == TYPE_LEA)
12962 rtx set = PATTERN (insn);
12964 if (GET_CODE (set) == PARALLEL)
12965 set = XVECEXP (set, 0, 0);
12967 gcc_assert (GET_CODE (set) == SET);
12969 return memory_address_length (SET_SRC (set));
12972 extract_insn_cached (insn);
12973 for (i = recog_data.n_operands - 1; i >= 0; --i)
12974 if (GET_CODE (recog_data.operand[i]) == MEM)
12976 return memory_address_length (XEXP (recog_data.operand[i], 0));
12982 /* Return the maximum number of instructions a cpu can issue. */
12985 ix86_issue_rate (void)
12989 case PROCESSOR_PENTIUM:
12993 case PROCESSOR_PENTIUMPRO:
12994 case PROCESSOR_PENTIUM4:
12995 case PROCESSOR_ATHLON:
12997 case PROCESSOR_NOCONA:
13005 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
13006 by DEP_INSN and nothing set by DEP_INSN. */
13009 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
13013 /* Simplify the test for uninteresting insns. */
13014 if (insn_type != TYPE_SETCC
13015 && insn_type != TYPE_ICMOV
13016 && insn_type != TYPE_FCMOV
13017 && insn_type != TYPE_IBR)
13020 if ((set = single_set (dep_insn)) != 0)
13022 set = SET_DEST (set);
13025 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
13026 && XVECLEN (PATTERN (dep_insn), 0) == 2
13027 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
13028 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
13030 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
13031 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
13036 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
13039 /* This test is true if the dependent insn reads the flags but
13040 not any other potentially set register. */
13041 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
13044 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
13050 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
13051 address with operands set by DEP_INSN. */
13054 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
13058 if (insn_type == TYPE_LEA
13061 addr = PATTERN (insn);
13063 if (GET_CODE (addr) == PARALLEL)
13064 addr = XVECEXP (addr, 0, 0);
13066 gcc_assert (GET_CODE (addr) == SET);
13068 addr = SET_SRC (addr);
13073 extract_insn_cached (insn);
13074 for (i = recog_data.n_operands - 1; i >= 0; --i)
13075 if (GET_CODE (recog_data.operand[i]) == MEM)
13077 addr = XEXP (recog_data.operand[i], 0);
13084 return modified_in_p (addr, dep_insn);
13088 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
13090 enum attr_type insn_type, dep_insn_type;
13091 enum attr_memory memory;
13093 int dep_insn_code_number;
13095 /* Anti and output dependencies have zero cost on all CPUs. */
13096 if (REG_NOTE_KIND (link) != 0)
13099 dep_insn_code_number = recog_memoized (dep_insn);
13101 /* If we can't recognize the insns, we can't really do anything. */
13102 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
13105 insn_type = get_attr_type (insn);
13106 dep_insn_type = get_attr_type (dep_insn);
13110 case PROCESSOR_PENTIUM:
13111 /* Address Generation Interlock adds a cycle of latency. */
13112 if (ix86_agi_dependant (insn, dep_insn, insn_type))
13115 /* ??? Compares pair with jump/setcc. */
13116 if (ix86_flags_dependant (insn, dep_insn, insn_type))
13119 /* Floating point stores require value to be ready one cycle earlier. */
13120 if (insn_type == TYPE_FMOV
13121 && get_attr_memory (insn) == MEMORY_STORE
13122 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13126 case PROCESSOR_PENTIUMPRO:
13127 memory = get_attr_memory (insn);
13129 /* INT->FP conversion is expensive. */
13130 if (get_attr_fp_int_src (dep_insn))
13133 /* There is one cycle extra latency between an FP op and a store. */
13134 if (insn_type == TYPE_FMOV
13135 && (set = single_set (dep_insn)) != NULL_RTX
13136 && (set2 = single_set (insn)) != NULL_RTX
13137 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
13138 && GET_CODE (SET_DEST (set2)) == MEM)
13141 /* Show ability of reorder buffer to hide latency of load by executing
13142 in parallel with previous instruction in case
13143 previous instruction is not needed to compute the address. */
13144 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13145 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13147 /* Claim moves to take one cycle, as core can issue one load
13148 at time and the next load can start cycle later. */
13149 if (dep_insn_type == TYPE_IMOV
13150 || dep_insn_type == TYPE_FMOV)
13158 memory = get_attr_memory (insn);
13160 /* The esp dependency is resolved before the instruction is really
13162 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
13163 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
13166 /* INT->FP conversion is expensive. */
13167 if (get_attr_fp_int_src (dep_insn))
13170 /* Show ability of reorder buffer to hide latency of load by executing
13171 in parallel with previous instruction in case
13172 previous instruction is not needed to compute the address. */
13173 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13174 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13176 /* Claim moves to take one cycle, as core can issue one load
13177 at time and the next load can start cycle later. */
13178 if (dep_insn_type == TYPE_IMOV
13179 || dep_insn_type == TYPE_FMOV)
13188 case PROCESSOR_ATHLON:
13190 memory = get_attr_memory (insn);
13192 /* Show ability of reorder buffer to hide latency of load by executing
13193 in parallel with previous instruction in case
13194 previous instruction is not needed to compute the address. */
13195 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13196 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13198 enum attr_unit unit = get_attr_unit (insn);
13201 /* Because of the difference between the length of integer and
13202 floating unit pipeline preparation stages, the memory operands
13203 for floating point are cheaper.
13205 ??? For Athlon it the difference is most probably 2. */
13206 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
13209 loadcost = TARGET_ATHLON ? 2 : 0;
13211 if (cost >= loadcost)
13224 /* How many alternative schedules to try. This should be as wide as the
13225 scheduling freedom in the DFA, but no wider. Making this value too
13226 large results extra work for the scheduler. */
13229 ia32_multipass_dfa_lookahead (void)
13231 if (ix86_tune == PROCESSOR_PENTIUM)
13234 if (ix86_tune == PROCESSOR_PENTIUMPRO
13235 || ix86_tune == PROCESSOR_K6)
13243 /* Compute the alignment given to a constant that is being placed in memory.
13244 EXP is the constant and ALIGN is the alignment that the object would
13246 The value of this function is used instead of that alignment to align
13250 ix86_constant_alignment (tree exp, int align)
13252 if (TREE_CODE (exp) == REAL_CST)
13254 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
13256 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
13259 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
13260 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
13261 return BITS_PER_WORD;
13266 /* Compute the alignment for a static variable.
13267 TYPE is the data type, and ALIGN is the alignment that
13268 the object would ordinarily have. The value of this function is used
13269 instead of that alignment to align the object. */
13272 ix86_data_alignment (tree type, int align)
13274 if (AGGREGATE_TYPE_P (type)
13275 && TYPE_SIZE (type)
13276 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13277 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
13278 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
13281 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13282 to 16byte boundary. */
13285 if (AGGREGATE_TYPE_P (type)
13286 && TYPE_SIZE (type)
13287 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13288 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
13289 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13293 if (TREE_CODE (type) == ARRAY_TYPE)
13295 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13297 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13300 else if (TREE_CODE (type) == COMPLEX_TYPE)
13303 if (TYPE_MODE (type) == DCmode && align < 64)
13305 if (TYPE_MODE (type) == XCmode && align < 128)
13308 else if ((TREE_CODE (type) == RECORD_TYPE
13309 || TREE_CODE (type) == UNION_TYPE
13310 || TREE_CODE (type) == QUAL_UNION_TYPE)
13311 && TYPE_FIELDS (type))
13313 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13315 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13318 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13319 || TREE_CODE (type) == INTEGER_TYPE)
13321 if (TYPE_MODE (type) == DFmode && align < 64)
13323 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13330 /* Compute the alignment for a local variable.
13331 TYPE is the data type, and ALIGN is the alignment that
13332 the object would ordinarily have. The value of this macro is used
13333 instead of that alignment to align the object. */
13336 ix86_local_alignment (tree type, int align)
13338 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13339 to 16byte boundary. */
13342 if (AGGREGATE_TYPE_P (type)
13343 && TYPE_SIZE (type)
13344 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13345 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
13346 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13349 if (TREE_CODE (type) == ARRAY_TYPE)
13351 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13353 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13356 else if (TREE_CODE (type) == COMPLEX_TYPE)
13358 if (TYPE_MODE (type) == DCmode && align < 64)
13360 if (TYPE_MODE (type) == XCmode && align < 128)
13363 else if ((TREE_CODE (type) == RECORD_TYPE
13364 || TREE_CODE (type) == UNION_TYPE
13365 || TREE_CODE (type) == QUAL_UNION_TYPE)
13366 && TYPE_FIELDS (type))
13368 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13370 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13373 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13374 || TREE_CODE (type) == INTEGER_TYPE)
13377 if (TYPE_MODE (type) == DFmode && align < 64)
13379 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13385 /* Emit RTL insns to initialize the variable parts of a trampoline.
13386 FNADDR is an RTX for the address of the function's pure code.
13387 CXT is an RTX for the static chain value for the function. */
13389 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
13393 /* Compute offset from the end of the jmp to the target function. */
13394 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
13395 plus_constant (tramp, 10),
13396 NULL_RTX, 1, OPTAB_DIRECT);
13397 emit_move_insn (gen_rtx_MEM (QImode, tramp),
13398 gen_int_mode (0xb9, QImode));
13399 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
13400 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
13401 gen_int_mode (0xe9, QImode));
13402 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
13407 /* Try to load address using shorter movl instead of movabs.
13408 We may want to support movq for kernel mode, but kernel does not use
13409 trampolines at the moment. */
13410 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
13412 fnaddr = copy_to_mode_reg (DImode, fnaddr);
13413 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13414 gen_int_mode (0xbb41, HImode));
13415 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
13416 gen_lowpart (SImode, fnaddr));
13421 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13422 gen_int_mode (0xbb49, HImode));
13423 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13427 /* Load static chain using movabs to r10. */
13428 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13429 gen_int_mode (0xba49, HImode));
13430 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13433 /* Jump to the r11 */
13434 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13435 gen_int_mode (0xff49, HImode));
13436 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
13437 gen_int_mode (0xe3, QImode));
13439 gcc_assert (offset <= TRAMPOLINE_SIZE);
13442 #ifdef ENABLE_EXECUTE_STACK
13443 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
13444 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
13448 /* Codes for all the SSE/MMX builtins. */
13451 IX86_BUILTIN_ADDPS,
13452 IX86_BUILTIN_ADDSS,
13453 IX86_BUILTIN_DIVPS,
13454 IX86_BUILTIN_DIVSS,
13455 IX86_BUILTIN_MULPS,
13456 IX86_BUILTIN_MULSS,
13457 IX86_BUILTIN_SUBPS,
13458 IX86_BUILTIN_SUBSS,
13460 IX86_BUILTIN_CMPEQPS,
13461 IX86_BUILTIN_CMPLTPS,
13462 IX86_BUILTIN_CMPLEPS,
13463 IX86_BUILTIN_CMPGTPS,
13464 IX86_BUILTIN_CMPGEPS,
13465 IX86_BUILTIN_CMPNEQPS,
13466 IX86_BUILTIN_CMPNLTPS,
13467 IX86_BUILTIN_CMPNLEPS,
13468 IX86_BUILTIN_CMPNGTPS,
13469 IX86_BUILTIN_CMPNGEPS,
13470 IX86_BUILTIN_CMPORDPS,
13471 IX86_BUILTIN_CMPUNORDPS,
13472 IX86_BUILTIN_CMPNEPS,
13473 IX86_BUILTIN_CMPEQSS,
13474 IX86_BUILTIN_CMPLTSS,
13475 IX86_BUILTIN_CMPLESS,
13476 IX86_BUILTIN_CMPNEQSS,
13477 IX86_BUILTIN_CMPNLTSS,
13478 IX86_BUILTIN_CMPNLESS,
13479 IX86_BUILTIN_CMPNGTSS,
13480 IX86_BUILTIN_CMPNGESS,
13481 IX86_BUILTIN_CMPORDSS,
13482 IX86_BUILTIN_CMPUNORDSS,
13483 IX86_BUILTIN_CMPNESS,
13485 IX86_BUILTIN_COMIEQSS,
13486 IX86_BUILTIN_COMILTSS,
13487 IX86_BUILTIN_COMILESS,
13488 IX86_BUILTIN_COMIGTSS,
13489 IX86_BUILTIN_COMIGESS,
13490 IX86_BUILTIN_COMINEQSS,
13491 IX86_BUILTIN_UCOMIEQSS,
13492 IX86_BUILTIN_UCOMILTSS,
13493 IX86_BUILTIN_UCOMILESS,
13494 IX86_BUILTIN_UCOMIGTSS,
13495 IX86_BUILTIN_UCOMIGESS,
13496 IX86_BUILTIN_UCOMINEQSS,
13498 IX86_BUILTIN_CVTPI2PS,
13499 IX86_BUILTIN_CVTPS2PI,
13500 IX86_BUILTIN_CVTSI2SS,
13501 IX86_BUILTIN_CVTSI642SS,
13502 IX86_BUILTIN_CVTSS2SI,
13503 IX86_BUILTIN_CVTSS2SI64,
13504 IX86_BUILTIN_CVTTPS2PI,
13505 IX86_BUILTIN_CVTTSS2SI,
13506 IX86_BUILTIN_CVTTSS2SI64,
13508 IX86_BUILTIN_MAXPS,
13509 IX86_BUILTIN_MAXSS,
13510 IX86_BUILTIN_MINPS,
13511 IX86_BUILTIN_MINSS,
13513 IX86_BUILTIN_LOADUPS,
13514 IX86_BUILTIN_STOREUPS,
13515 IX86_BUILTIN_MOVSS,
13517 IX86_BUILTIN_MOVHLPS,
13518 IX86_BUILTIN_MOVLHPS,
13519 IX86_BUILTIN_LOADHPS,
13520 IX86_BUILTIN_LOADLPS,
13521 IX86_BUILTIN_STOREHPS,
13522 IX86_BUILTIN_STORELPS,
13524 IX86_BUILTIN_MASKMOVQ,
13525 IX86_BUILTIN_MOVMSKPS,
13526 IX86_BUILTIN_PMOVMSKB,
13528 IX86_BUILTIN_MOVNTPS,
13529 IX86_BUILTIN_MOVNTQ,
13531 IX86_BUILTIN_LOADDQU,
13532 IX86_BUILTIN_STOREDQU,
13534 IX86_BUILTIN_PACKSSWB,
13535 IX86_BUILTIN_PACKSSDW,
13536 IX86_BUILTIN_PACKUSWB,
13538 IX86_BUILTIN_PADDB,
13539 IX86_BUILTIN_PADDW,
13540 IX86_BUILTIN_PADDD,
13541 IX86_BUILTIN_PADDQ,
13542 IX86_BUILTIN_PADDSB,
13543 IX86_BUILTIN_PADDSW,
13544 IX86_BUILTIN_PADDUSB,
13545 IX86_BUILTIN_PADDUSW,
13546 IX86_BUILTIN_PSUBB,
13547 IX86_BUILTIN_PSUBW,
13548 IX86_BUILTIN_PSUBD,
13549 IX86_BUILTIN_PSUBQ,
13550 IX86_BUILTIN_PSUBSB,
13551 IX86_BUILTIN_PSUBSW,
13552 IX86_BUILTIN_PSUBUSB,
13553 IX86_BUILTIN_PSUBUSW,
13556 IX86_BUILTIN_PANDN,
13560 IX86_BUILTIN_PAVGB,
13561 IX86_BUILTIN_PAVGW,
13563 IX86_BUILTIN_PCMPEQB,
13564 IX86_BUILTIN_PCMPEQW,
13565 IX86_BUILTIN_PCMPEQD,
13566 IX86_BUILTIN_PCMPGTB,
13567 IX86_BUILTIN_PCMPGTW,
13568 IX86_BUILTIN_PCMPGTD,
13570 IX86_BUILTIN_PMADDWD,
13572 IX86_BUILTIN_PMAXSW,
13573 IX86_BUILTIN_PMAXUB,
13574 IX86_BUILTIN_PMINSW,
13575 IX86_BUILTIN_PMINUB,
13577 IX86_BUILTIN_PMULHUW,
13578 IX86_BUILTIN_PMULHW,
13579 IX86_BUILTIN_PMULLW,
13581 IX86_BUILTIN_PSADBW,
13582 IX86_BUILTIN_PSHUFW,
13584 IX86_BUILTIN_PSLLW,
13585 IX86_BUILTIN_PSLLD,
13586 IX86_BUILTIN_PSLLQ,
13587 IX86_BUILTIN_PSRAW,
13588 IX86_BUILTIN_PSRAD,
13589 IX86_BUILTIN_PSRLW,
13590 IX86_BUILTIN_PSRLD,
13591 IX86_BUILTIN_PSRLQ,
13592 IX86_BUILTIN_PSLLWI,
13593 IX86_BUILTIN_PSLLDI,
13594 IX86_BUILTIN_PSLLQI,
13595 IX86_BUILTIN_PSRAWI,
13596 IX86_BUILTIN_PSRADI,
13597 IX86_BUILTIN_PSRLWI,
13598 IX86_BUILTIN_PSRLDI,
13599 IX86_BUILTIN_PSRLQI,
13601 IX86_BUILTIN_PUNPCKHBW,
13602 IX86_BUILTIN_PUNPCKHWD,
13603 IX86_BUILTIN_PUNPCKHDQ,
13604 IX86_BUILTIN_PUNPCKLBW,
13605 IX86_BUILTIN_PUNPCKLWD,
13606 IX86_BUILTIN_PUNPCKLDQ,
13608 IX86_BUILTIN_SHUFPS,
13610 IX86_BUILTIN_RCPPS,
13611 IX86_BUILTIN_RCPSS,
13612 IX86_BUILTIN_RSQRTPS,
13613 IX86_BUILTIN_RSQRTSS,
13614 IX86_BUILTIN_SQRTPS,
13615 IX86_BUILTIN_SQRTSS,
13617 IX86_BUILTIN_UNPCKHPS,
13618 IX86_BUILTIN_UNPCKLPS,
13620 IX86_BUILTIN_ANDPS,
13621 IX86_BUILTIN_ANDNPS,
13623 IX86_BUILTIN_XORPS,
13626 IX86_BUILTIN_LDMXCSR,
13627 IX86_BUILTIN_STMXCSR,
13628 IX86_BUILTIN_SFENCE,
13630 /* 3DNow! Original */
13631 IX86_BUILTIN_FEMMS,
13632 IX86_BUILTIN_PAVGUSB,
13633 IX86_BUILTIN_PF2ID,
13634 IX86_BUILTIN_PFACC,
13635 IX86_BUILTIN_PFADD,
13636 IX86_BUILTIN_PFCMPEQ,
13637 IX86_BUILTIN_PFCMPGE,
13638 IX86_BUILTIN_PFCMPGT,
13639 IX86_BUILTIN_PFMAX,
13640 IX86_BUILTIN_PFMIN,
13641 IX86_BUILTIN_PFMUL,
13642 IX86_BUILTIN_PFRCP,
13643 IX86_BUILTIN_PFRCPIT1,
13644 IX86_BUILTIN_PFRCPIT2,
13645 IX86_BUILTIN_PFRSQIT1,
13646 IX86_BUILTIN_PFRSQRT,
13647 IX86_BUILTIN_PFSUB,
13648 IX86_BUILTIN_PFSUBR,
13649 IX86_BUILTIN_PI2FD,
13650 IX86_BUILTIN_PMULHRW,
13652 /* 3DNow! Athlon Extensions */
13653 IX86_BUILTIN_PF2IW,
13654 IX86_BUILTIN_PFNACC,
13655 IX86_BUILTIN_PFPNACC,
13656 IX86_BUILTIN_PI2FW,
13657 IX86_BUILTIN_PSWAPDSI,
13658 IX86_BUILTIN_PSWAPDSF,
13661 IX86_BUILTIN_ADDPD,
13662 IX86_BUILTIN_ADDSD,
13663 IX86_BUILTIN_DIVPD,
13664 IX86_BUILTIN_DIVSD,
13665 IX86_BUILTIN_MULPD,
13666 IX86_BUILTIN_MULSD,
13667 IX86_BUILTIN_SUBPD,
13668 IX86_BUILTIN_SUBSD,
13670 IX86_BUILTIN_CMPEQPD,
13671 IX86_BUILTIN_CMPLTPD,
13672 IX86_BUILTIN_CMPLEPD,
13673 IX86_BUILTIN_CMPGTPD,
13674 IX86_BUILTIN_CMPGEPD,
13675 IX86_BUILTIN_CMPNEQPD,
13676 IX86_BUILTIN_CMPNLTPD,
13677 IX86_BUILTIN_CMPNLEPD,
13678 IX86_BUILTIN_CMPNGTPD,
13679 IX86_BUILTIN_CMPNGEPD,
13680 IX86_BUILTIN_CMPORDPD,
13681 IX86_BUILTIN_CMPUNORDPD,
13682 IX86_BUILTIN_CMPNEPD,
13683 IX86_BUILTIN_CMPEQSD,
13684 IX86_BUILTIN_CMPLTSD,
13685 IX86_BUILTIN_CMPLESD,
13686 IX86_BUILTIN_CMPNEQSD,
13687 IX86_BUILTIN_CMPNLTSD,
13688 IX86_BUILTIN_CMPNLESD,
13689 IX86_BUILTIN_CMPORDSD,
13690 IX86_BUILTIN_CMPUNORDSD,
13691 IX86_BUILTIN_CMPNESD,
13693 IX86_BUILTIN_COMIEQSD,
13694 IX86_BUILTIN_COMILTSD,
13695 IX86_BUILTIN_COMILESD,
13696 IX86_BUILTIN_COMIGTSD,
13697 IX86_BUILTIN_COMIGESD,
13698 IX86_BUILTIN_COMINEQSD,
13699 IX86_BUILTIN_UCOMIEQSD,
13700 IX86_BUILTIN_UCOMILTSD,
13701 IX86_BUILTIN_UCOMILESD,
13702 IX86_BUILTIN_UCOMIGTSD,
13703 IX86_BUILTIN_UCOMIGESD,
13704 IX86_BUILTIN_UCOMINEQSD,
13706 IX86_BUILTIN_MAXPD,
13707 IX86_BUILTIN_MAXSD,
13708 IX86_BUILTIN_MINPD,
13709 IX86_BUILTIN_MINSD,
13711 IX86_BUILTIN_ANDPD,
13712 IX86_BUILTIN_ANDNPD,
13714 IX86_BUILTIN_XORPD,
13716 IX86_BUILTIN_SQRTPD,
13717 IX86_BUILTIN_SQRTSD,
13719 IX86_BUILTIN_UNPCKHPD,
13720 IX86_BUILTIN_UNPCKLPD,
13722 IX86_BUILTIN_SHUFPD,
13724 IX86_BUILTIN_LOADUPD,
13725 IX86_BUILTIN_STOREUPD,
13726 IX86_BUILTIN_MOVSD,
13728 IX86_BUILTIN_LOADHPD,
13729 IX86_BUILTIN_LOADLPD,
13731 IX86_BUILTIN_CVTDQ2PD,
13732 IX86_BUILTIN_CVTDQ2PS,
13734 IX86_BUILTIN_CVTPD2DQ,
13735 IX86_BUILTIN_CVTPD2PI,
13736 IX86_BUILTIN_CVTPD2PS,
13737 IX86_BUILTIN_CVTTPD2DQ,
13738 IX86_BUILTIN_CVTTPD2PI,
13740 IX86_BUILTIN_CVTPI2PD,
13741 IX86_BUILTIN_CVTSI2SD,
13742 IX86_BUILTIN_CVTSI642SD,
13744 IX86_BUILTIN_CVTSD2SI,
13745 IX86_BUILTIN_CVTSD2SI64,
13746 IX86_BUILTIN_CVTSD2SS,
13747 IX86_BUILTIN_CVTSS2SD,
13748 IX86_BUILTIN_CVTTSD2SI,
13749 IX86_BUILTIN_CVTTSD2SI64,
13751 IX86_BUILTIN_CVTPS2DQ,
13752 IX86_BUILTIN_CVTPS2PD,
13753 IX86_BUILTIN_CVTTPS2DQ,
13755 IX86_BUILTIN_MOVNTI,
13756 IX86_BUILTIN_MOVNTPD,
13757 IX86_BUILTIN_MOVNTDQ,
13760 IX86_BUILTIN_MASKMOVDQU,
13761 IX86_BUILTIN_MOVMSKPD,
13762 IX86_BUILTIN_PMOVMSKB128,
13764 IX86_BUILTIN_PACKSSWB128,
13765 IX86_BUILTIN_PACKSSDW128,
13766 IX86_BUILTIN_PACKUSWB128,
13768 IX86_BUILTIN_PADDB128,
13769 IX86_BUILTIN_PADDW128,
13770 IX86_BUILTIN_PADDD128,
13771 IX86_BUILTIN_PADDQ128,
13772 IX86_BUILTIN_PADDSB128,
13773 IX86_BUILTIN_PADDSW128,
13774 IX86_BUILTIN_PADDUSB128,
13775 IX86_BUILTIN_PADDUSW128,
13776 IX86_BUILTIN_PSUBB128,
13777 IX86_BUILTIN_PSUBW128,
13778 IX86_BUILTIN_PSUBD128,
13779 IX86_BUILTIN_PSUBQ128,
13780 IX86_BUILTIN_PSUBSB128,
13781 IX86_BUILTIN_PSUBSW128,
13782 IX86_BUILTIN_PSUBUSB128,
13783 IX86_BUILTIN_PSUBUSW128,
13785 IX86_BUILTIN_PAND128,
13786 IX86_BUILTIN_PANDN128,
13787 IX86_BUILTIN_POR128,
13788 IX86_BUILTIN_PXOR128,
13790 IX86_BUILTIN_PAVGB128,
13791 IX86_BUILTIN_PAVGW128,
13793 IX86_BUILTIN_PCMPEQB128,
13794 IX86_BUILTIN_PCMPEQW128,
13795 IX86_BUILTIN_PCMPEQD128,
13796 IX86_BUILTIN_PCMPGTB128,
13797 IX86_BUILTIN_PCMPGTW128,
13798 IX86_BUILTIN_PCMPGTD128,
13800 IX86_BUILTIN_PMADDWD128,
13802 IX86_BUILTIN_PMAXSW128,
13803 IX86_BUILTIN_PMAXUB128,
13804 IX86_BUILTIN_PMINSW128,
13805 IX86_BUILTIN_PMINUB128,
13807 IX86_BUILTIN_PMULUDQ,
13808 IX86_BUILTIN_PMULUDQ128,
13809 IX86_BUILTIN_PMULHUW128,
13810 IX86_BUILTIN_PMULHW128,
13811 IX86_BUILTIN_PMULLW128,
13813 IX86_BUILTIN_PSADBW128,
13814 IX86_BUILTIN_PSHUFHW,
13815 IX86_BUILTIN_PSHUFLW,
13816 IX86_BUILTIN_PSHUFD,
13818 IX86_BUILTIN_PSLLW128,
13819 IX86_BUILTIN_PSLLD128,
13820 IX86_BUILTIN_PSLLQ128,
13821 IX86_BUILTIN_PSRAW128,
13822 IX86_BUILTIN_PSRAD128,
13823 IX86_BUILTIN_PSRLW128,
13824 IX86_BUILTIN_PSRLD128,
13825 IX86_BUILTIN_PSRLQ128,
13826 IX86_BUILTIN_PSLLDQI128,
13827 IX86_BUILTIN_PSLLWI128,
13828 IX86_BUILTIN_PSLLDI128,
13829 IX86_BUILTIN_PSLLQI128,
13830 IX86_BUILTIN_PSRAWI128,
13831 IX86_BUILTIN_PSRADI128,
13832 IX86_BUILTIN_PSRLDQI128,
13833 IX86_BUILTIN_PSRLWI128,
13834 IX86_BUILTIN_PSRLDI128,
13835 IX86_BUILTIN_PSRLQI128,
13837 IX86_BUILTIN_PUNPCKHBW128,
13838 IX86_BUILTIN_PUNPCKHWD128,
13839 IX86_BUILTIN_PUNPCKHDQ128,
13840 IX86_BUILTIN_PUNPCKHQDQ128,
13841 IX86_BUILTIN_PUNPCKLBW128,
13842 IX86_BUILTIN_PUNPCKLWD128,
13843 IX86_BUILTIN_PUNPCKLDQ128,
13844 IX86_BUILTIN_PUNPCKLQDQ128,
13846 IX86_BUILTIN_CLFLUSH,
13847 IX86_BUILTIN_MFENCE,
13848 IX86_BUILTIN_LFENCE,
13850 /* Prescott New Instructions. */
13851 IX86_BUILTIN_ADDSUBPS,
13852 IX86_BUILTIN_HADDPS,
13853 IX86_BUILTIN_HSUBPS,
13854 IX86_BUILTIN_MOVSHDUP,
13855 IX86_BUILTIN_MOVSLDUP,
13856 IX86_BUILTIN_ADDSUBPD,
13857 IX86_BUILTIN_HADDPD,
13858 IX86_BUILTIN_HSUBPD,
13859 IX86_BUILTIN_LDDQU,
13861 IX86_BUILTIN_MONITOR,
13862 IX86_BUILTIN_MWAIT,
13864 IX86_BUILTIN_VEC_INIT_V2SI,
13865 IX86_BUILTIN_VEC_INIT_V4HI,
13866 IX86_BUILTIN_VEC_INIT_V8QI,
13867 IX86_BUILTIN_VEC_EXT_V2DF,
13868 IX86_BUILTIN_VEC_EXT_V2DI,
13869 IX86_BUILTIN_VEC_EXT_V4SF,
13870 IX86_BUILTIN_VEC_EXT_V4SI,
13871 IX86_BUILTIN_VEC_EXT_V8HI,
13872 IX86_BUILTIN_VEC_EXT_V2SI,
13873 IX86_BUILTIN_VEC_EXT_V4HI,
13874 IX86_BUILTIN_VEC_SET_V8HI,
13875 IX86_BUILTIN_VEC_SET_V4HI,
13880 #define def_builtin(MASK, NAME, TYPE, CODE) \
13882 if ((MASK) & target_flags \
13883 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
13884 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
13885 NULL, NULL_TREE); \
13888 /* Bits for builtin_description.flag. */
13890 /* Set when we don't support the comparison natively, and should
13891 swap_comparison in order to support it. */
13892 #define BUILTIN_DESC_SWAP_OPERANDS 1
13894 struct builtin_description
13896 const unsigned int mask;
13897 const enum insn_code icode;
13898 const char *const name;
13899 const enum ix86_builtins code;
13900 const enum rtx_code comparison;
13901 const unsigned int flag;
13904 static const struct builtin_description bdesc_comi[] =
13906 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
13907 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
13908 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
13909 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
13910 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
13911 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
13912 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
13913 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
13914 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
13915 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
13916 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
13917 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
13918 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
13919 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
13920 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
13921 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
13922 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
13923 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
13924 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
13925 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
13926 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
13927 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
13928 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
13929 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
13932 static const struct builtin_description bdesc_2arg[] =
13935 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
13936 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
13937 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
13938 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
13939 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
13940 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
13941 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
13942 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
13944 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
13945 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
13946 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
13947 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
13948 BUILTIN_DESC_SWAP_OPERANDS },
13949 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
13950 BUILTIN_DESC_SWAP_OPERANDS },
13951 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
13952 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
13953 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
13954 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
13955 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
13956 BUILTIN_DESC_SWAP_OPERANDS },
13957 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
13958 BUILTIN_DESC_SWAP_OPERANDS },
13959 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
13960 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
13961 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
13962 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
13963 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
13964 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
13965 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
13966 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
13967 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
13968 BUILTIN_DESC_SWAP_OPERANDS },
13969 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
13970 BUILTIN_DESC_SWAP_OPERANDS },
13971 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
13973 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
13974 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
13975 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
13976 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
13978 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
13979 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
13980 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
13981 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
13983 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
13984 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
13985 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
13986 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
13987 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
13990 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
13991 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
13992 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
13993 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
13994 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
13995 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
13996 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
13997 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
13999 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
14000 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
14001 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
14002 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
14003 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
14004 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
14005 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
14006 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
14008 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
14009 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
14010 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
14012 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
14013 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
14014 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
14015 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
14017 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
14018 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
14020 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
14021 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
14022 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
14023 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
14024 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
14025 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
14027 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
14028 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
14029 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
14030 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
14032 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
14033 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
14034 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
14035 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
14036 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
14037 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
14040 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
14041 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
14042 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
14044 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
14045 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
14046 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
14048 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
14049 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
14050 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
14051 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
14052 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
14053 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
14055 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
14056 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
14057 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
14058 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
14059 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
14060 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
14062 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
14063 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
14064 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
14065 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
14067 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
14068 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
14071 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
14072 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
14073 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
14074 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
14075 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
14076 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
14077 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
14078 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
14080 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
14081 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
14082 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
14083 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
14084 BUILTIN_DESC_SWAP_OPERANDS },
14085 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
14086 BUILTIN_DESC_SWAP_OPERANDS },
14087 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
14088 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
14089 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
14090 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
14091 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
14092 BUILTIN_DESC_SWAP_OPERANDS },
14093 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
14094 BUILTIN_DESC_SWAP_OPERANDS },
14095 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
14096 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
14097 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
14098 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
14099 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
14100 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
14101 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
14102 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
14103 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
14105 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
14106 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
14107 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
14108 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
14110 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
14111 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
14112 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
14113 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
14115 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
14116 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
14117 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
14120 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
14121 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
14122 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
14123 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
14124 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
14125 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
14126 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
14127 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
14129 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
14130 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
14131 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
14132 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
14133 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
14134 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
14135 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
14136 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
14138 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
14139 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
14141 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
14142 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
14143 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
14144 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
14146 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
14147 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
14149 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
14150 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
14151 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
14152 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
14153 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
14154 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
14156 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
14157 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
14158 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
14159 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
14161 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
14162 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
14163 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
14164 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
14165 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
14166 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
14167 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
14168 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
14170 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
14171 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
14172 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
14174 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
14175 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
14177 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
14178 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
14180 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
14181 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
14182 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
14184 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
14185 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
14186 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
14188 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
14189 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
14191 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
14193 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
14194 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
14195 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
14196 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
14199 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
14200 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
14201 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
14202 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
14203 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
14204 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
14207 static const struct builtin_description bdesc_1arg[] =
14209 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
14210 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
14212 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
14213 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
14214 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
14216 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
14217 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
14218 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
14219 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
14220 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
14221 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
14223 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
14224 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
14226 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
14228 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
14229 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
14231 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
14232 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
14233 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
14234 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
14235 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
14237 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
14239 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
14240 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
14241 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
14242 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
14244 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
14245 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
14246 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
14249 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
14250 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
14254 ix86_init_builtins (void)
14257 ix86_init_mmx_sse_builtins ();
14260 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
14261 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
14264 ix86_init_mmx_sse_builtins (void)
14266 const struct builtin_description * d;
14269 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
14270 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
14271 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
14272 tree V2DI_type_node
14273 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
14274 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
14275 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
14276 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
14277 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
14278 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
14279 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
14281 tree pchar_type_node = build_pointer_type (char_type_node);
14282 tree pcchar_type_node = build_pointer_type (
14283 build_type_variant (char_type_node, 1, 0));
14284 tree pfloat_type_node = build_pointer_type (float_type_node);
14285 tree pcfloat_type_node = build_pointer_type (
14286 build_type_variant (float_type_node, 1, 0));
14287 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
14288 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
14289 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
14292 tree int_ftype_v4sf_v4sf
14293 = build_function_type_list (integer_type_node,
14294 V4SF_type_node, V4SF_type_node, NULL_TREE);
14295 tree v4si_ftype_v4sf_v4sf
14296 = build_function_type_list (V4SI_type_node,
14297 V4SF_type_node, V4SF_type_node, NULL_TREE);
14298 /* MMX/SSE/integer conversions. */
14299 tree int_ftype_v4sf
14300 = build_function_type_list (integer_type_node,
14301 V4SF_type_node, NULL_TREE);
14302 tree int64_ftype_v4sf
14303 = build_function_type_list (long_long_integer_type_node,
14304 V4SF_type_node, NULL_TREE);
14305 tree int_ftype_v8qi
14306 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
14307 tree v4sf_ftype_v4sf_int
14308 = build_function_type_list (V4SF_type_node,
14309 V4SF_type_node, integer_type_node, NULL_TREE);
14310 tree v4sf_ftype_v4sf_int64
14311 = build_function_type_list (V4SF_type_node,
14312 V4SF_type_node, long_long_integer_type_node,
14314 tree v4sf_ftype_v4sf_v2si
14315 = build_function_type_list (V4SF_type_node,
14316 V4SF_type_node, V2SI_type_node, NULL_TREE);
14318 /* Miscellaneous. */
14319 tree v8qi_ftype_v4hi_v4hi
14320 = build_function_type_list (V8QI_type_node,
14321 V4HI_type_node, V4HI_type_node, NULL_TREE);
14322 tree v4hi_ftype_v2si_v2si
14323 = build_function_type_list (V4HI_type_node,
14324 V2SI_type_node, V2SI_type_node, NULL_TREE);
14325 tree v4sf_ftype_v4sf_v4sf_int
14326 = build_function_type_list (V4SF_type_node,
14327 V4SF_type_node, V4SF_type_node,
14328 integer_type_node, NULL_TREE);
14329 tree v2si_ftype_v4hi_v4hi
14330 = build_function_type_list (V2SI_type_node,
14331 V4HI_type_node, V4HI_type_node, NULL_TREE);
14332 tree v4hi_ftype_v4hi_int
14333 = build_function_type_list (V4HI_type_node,
14334 V4HI_type_node, integer_type_node, NULL_TREE);
14335 tree v4hi_ftype_v4hi_di
14336 = build_function_type_list (V4HI_type_node,
14337 V4HI_type_node, long_long_unsigned_type_node,
14339 tree v2si_ftype_v2si_di
14340 = build_function_type_list (V2SI_type_node,
14341 V2SI_type_node, long_long_unsigned_type_node,
14343 tree void_ftype_void
14344 = build_function_type (void_type_node, void_list_node);
14345 tree void_ftype_unsigned
14346 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
14347 tree void_ftype_unsigned_unsigned
14348 = build_function_type_list (void_type_node, unsigned_type_node,
14349 unsigned_type_node, NULL_TREE);
14350 tree void_ftype_pcvoid_unsigned_unsigned
14351 = build_function_type_list (void_type_node, const_ptr_type_node,
14352 unsigned_type_node, unsigned_type_node,
14354 tree unsigned_ftype_void
14355 = build_function_type (unsigned_type_node, void_list_node);
14356 tree v2si_ftype_v4sf
14357 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
14358 /* Loads/stores. */
14359 tree void_ftype_v8qi_v8qi_pchar
14360 = build_function_type_list (void_type_node,
14361 V8QI_type_node, V8QI_type_node,
14362 pchar_type_node, NULL_TREE);
14363 tree v4sf_ftype_pcfloat
14364 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
14365 /* @@@ the type is bogus */
14366 tree v4sf_ftype_v4sf_pv2si
14367 = build_function_type_list (V4SF_type_node,
14368 V4SF_type_node, pv2si_type_node, NULL_TREE);
14369 tree void_ftype_pv2si_v4sf
14370 = build_function_type_list (void_type_node,
14371 pv2si_type_node, V4SF_type_node, NULL_TREE);
14372 tree void_ftype_pfloat_v4sf
14373 = build_function_type_list (void_type_node,
14374 pfloat_type_node, V4SF_type_node, NULL_TREE);
14375 tree void_ftype_pdi_di
14376 = build_function_type_list (void_type_node,
14377 pdi_type_node, long_long_unsigned_type_node,
14379 tree void_ftype_pv2di_v2di
14380 = build_function_type_list (void_type_node,
14381 pv2di_type_node, V2DI_type_node, NULL_TREE);
14382 /* Normal vector unops. */
14383 tree v4sf_ftype_v4sf
14384 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14386 /* Normal vector binops. */
14387 tree v4sf_ftype_v4sf_v4sf
14388 = build_function_type_list (V4SF_type_node,
14389 V4SF_type_node, V4SF_type_node, NULL_TREE);
14390 tree v8qi_ftype_v8qi_v8qi
14391 = build_function_type_list (V8QI_type_node,
14392 V8QI_type_node, V8QI_type_node, NULL_TREE);
14393 tree v4hi_ftype_v4hi_v4hi
14394 = build_function_type_list (V4HI_type_node,
14395 V4HI_type_node, V4HI_type_node, NULL_TREE);
14396 tree v2si_ftype_v2si_v2si
14397 = build_function_type_list (V2SI_type_node,
14398 V2SI_type_node, V2SI_type_node, NULL_TREE);
14399 tree di_ftype_di_di
14400 = build_function_type_list (long_long_unsigned_type_node,
14401 long_long_unsigned_type_node,
14402 long_long_unsigned_type_node, NULL_TREE);
14404 tree v2si_ftype_v2sf
14405 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
14406 tree v2sf_ftype_v2si
14407 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
14408 tree v2si_ftype_v2si
14409 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
14410 tree v2sf_ftype_v2sf
14411 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
14412 tree v2sf_ftype_v2sf_v2sf
14413 = build_function_type_list (V2SF_type_node,
14414 V2SF_type_node, V2SF_type_node, NULL_TREE);
14415 tree v2si_ftype_v2sf_v2sf
14416 = build_function_type_list (V2SI_type_node,
14417 V2SF_type_node, V2SF_type_node, NULL_TREE);
14418 tree pint_type_node = build_pointer_type (integer_type_node);
14419 tree pdouble_type_node = build_pointer_type (double_type_node);
14420 tree pcdouble_type_node = build_pointer_type (
14421 build_type_variant (double_type_node, 1, 0));
14422 tree int_ftype_v2df_v2df
14423 = build_function_type_list (integer_type_node,
14424 V2DF_type_node, V2DF_type_node, NULL_TREE);
14426 tree ti_ftype_ti_ti
14427 = build_function_type_list (intTI_type_node,
14428 intTI_type_node, intTI_type_node, NULL_TREE);
14429 tree void_ftype_pcvoid
14430 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
14431 tree v4sf_ftype_v4si
14432 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
14433 tree v4si_ftype_v4sf
14434 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
14435 tree v2df_ftype_v4si
14436 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
14437 tree v4si_ftype_v2df
14438 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
14439 tree v2si_ftype_v2df
14440 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
14441 tree v4sf_ftype_v2df
14442 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
14443 tree v2df_ftype_v2si
14444 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
14445 tree v2df_ftype_v4sf
14446 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
14447 tree int_ftype_v2df
14448 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
14449 tree int64_ftype_v2df
14450 = build_function_type_list (long_long_integer_type_node,
14451 V2DF_type_node, NULL_TREE);
14452 tree v2df_ftype_v2df_int
14453 = build_function_type_list (V2DF_type_node,
14454 V2DF_type_node, integer_type_node, NULL_TREE);
14455 tree v2df_ftype_v2df_int64
14456 = build_function_type_list (V2DF_type_node,
14457 V2DF_type_node, long_long_integer_type_node,
14459 tree v4sf_ftype_v4sf_v2df
14460 = build_function_type_list (V4SF_type_node,
14461 V4SF_type_node, V2DF_type_node, NULL_TREE);
14462 tree v2df_ftype_v2df_v4sf
14463 = build_function_type_list (V2DF_type_node,
14464 V2DF_type_node, V4SF_type_node, NULL_TREE);
14465 tree v2df_ftype_v2df_v2df_int
14466 = build_function_type_list (V2DF_type_node,
14467 V2DF_type_node, V2DF_type_node,
14470 tree v2df_ftype_v2df_pcdouble
14471 = build_function_type_list (V2DF_type_node,
14472 V2DF_type_node, pcdouble_type_node, NULL_TREE);
14473 tree void_ftype_pdouble_v2df
14474 = build_function_type_list (void_type_node,
14475 pdouble_type_node, V2DF_type_node, NULL_TREE);
14476 tree void_ftype_pint_int
14477 = build_function_type_list (void_type_node,
14478 pint_type_node, integer_type_node, NULL_TREE);
14479 tree void_ftype_v16qi_v16qi_pchar
14480 = build_function_type_list (void_type_node,
14481 V16QI_type_node, V16QI_type_node,
14482 pchar_type_node, NULL_TREE);
14483 tree v2df_ftype_pcdouble
14484 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
14485 tree v2df_ftype_v2df_v2df
14486 = build_function_type_list (V2DF_type_node,
14487 V2DF_type_node, V2DF_type_node, NULL_TREE);
14488 tree v16qi_ftype_v16qi_v16qi
14489 = build_function_type_list (V16QI_type_node,
14490 V16QI_type_node, V16QI_type_node, NULL_TREE);
14491 tree v8hi_ftype_v8hi_v8hi
14492 = build_function_type_list (V8HI_type_node,
14493 V8HI_type_node, V8HI_type_node, NULL_TREE);
14494 tree v4si_ftype_v4si_v4si
14495 = build_function_type_list (V4SI_type_node,
14496 V4SI_type_node, V4SI_type_node, NULL_TREE);
14497 tree v2di_ftype_v2di_v2di
14498 = build_function_type_list (V2DI_type_node,
14499 V2DI_type_node, V2DI_type_node, NULL_TREE);
14500 tree v2di_ftype_v2df_v2df
14501 = build_function_type_list (V2DI_type_node,
14502 V2DF_type_node, V2DF_type_node, NULL_TREE);
14503 tree v2df_ftype_v2df
14504 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14505 tree v2di_ftype_v2di_int
14506 = build_function_type_list (V2DI_type_node,
14507 V2DI_type_node, integer_type_node, NULL_TREE);
14508 tree v4si_ftype_v4si_int
14509 = build_function_type_list (V4SI_type_node,
14510 V4SI_type_node, integer_type_node, NULL_TREE);
14511 tree v8hi_ftype_v8hi_int
14512 = build_function_type_list (V8HI_type_node,
14513 V8HI_type_node, integer_type_node, NULL_TREE);
14514 tree v8hi_ftype_v8hi_v2di
14515 = build_function_type_list (V8HI_type_node,
14516 V8HI_type_node, V2DI_type_node, NULL_TREE);
14517 tree v4si_ftype_v4si_v2di
14518 = build_function_type_list (V4SI_type_node,
14519 V4SI_type_node, V2DI_type_node, NULL_TREE);
14520 tree v4si_ftype_v8hi_v8hi
14521 = build_function_type_list (V4SI_type_node,
14522 V8HI_type_node, V8HI_type_node, NULL_TREE);
14523 tree di_ftype_v8qi_v8qi
14524 = build_function_type_list (long_long_unsigned_type_node,
14525 V8QI_type_node, V8QI_type_node, NULL_TREE);
14526 tree di_ftype_v2si_v2si
14527 = build_function_type_list (long_long_unsigned_type_node,
14528 V2SI_type_node, V2SI_type_node, NULL_TREE);
14529 tree v2di_ftype_v16qi_v16qi
14530 = build_function_type_list (V2DI_type_node,
14531 V16QI_type_node, V16QI_type_node, NULL_TREE);
14532 tree v2di_ftype_v4si_v4si
14533 = build_function_type_list (V2DI_type_node,
14534 V4SI_type_node, V4SI_type_node, NULL_TREE);
14535 tree int_ftype_v16qi
14536 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
14537 tree v16qi_ftype_pcchar
14538 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
14539 tree void_ftype_pchar_v16qi
14540 = build_function_type_list (void_type_node,
14541 pchar_type_node, V16QI_type_node, NULL_TREE);
14544 tree float128_type;
14547 /* The __float80 type. */
14548 if (TYPE_MODE (long_double_type_node) == XFmode)
14549 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
14553 /* The __float80 type. */
14554 float80_type = make_node (REAL_TYPE);
14555 TYPE_PRECISION (float80_type) = 80;
14556 layout_type (float80_type);
14557 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
14560 float128_type = make_node (REAL_TYPE);
14561 TYPE_PRECISION (float128_type) = 128;
14562 layout_type (float128_type);
14563 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
14565 /* Add all builtins that are more or less simple operations on two
14567 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14569 /* Use one of the operands; the target can have a different mode for
14570 mask-generating compares. */
14571 enum machine_mode mode;
14576 mode = insn_data[d->icode].operand[1].mode;
14581 type = v16qi_ftype_v16qi_v16qi;
14584 type = v8hi_ftype_v8hi_v8hi;
14587 type = v4si_ftype_v4si_v4si;
14590 type = v2di_ftype_v2di_v2di;
14593 type = v2df_ftype_v2df_v2df;
14596 type = ti_ftype_ti_ti;
14599 type = v4sf_ftype_v4sf_v4sf;
14602 type = v8qi_ftype_v8qi_v8qi;
14605 type = v4hi_ftype_v4hi_v4hi;
14608 type = v2si_ftype_v2si_v2si;
14611 type = di_ftype_di_di;
14615 gcc_unreachable ();
14618 /* Override for comparisons. */
14619 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14620 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
14621 type = v4si_ftype_v4sf_v4sf;
14623 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
14624 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14625 type = v2di_ftype_v2df_v2df;
14627 def_builtin (d->mask, d->name, type, d->code);
14630 /* Add the remaining MMX insns with somewhat more complicated types. */
14631 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
14632 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
14633 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
14634 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
14636 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
14637 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
14638 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
14640 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
14641 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
14643 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
14644 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
14646 /* comi/ucomi insns. */
14647 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14648 if (d->mask == MASK_SSE2)
14649 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
14651 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
14653 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
14654 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
14655 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
14657 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
14658 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
14659 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
14660 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
14661 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
14662 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
14663 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
14664 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
14665 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
14666 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
14667 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
14669 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
14671 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
14672 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
14674 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
14675 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
14676 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
14677 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
14679 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
14680 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
14681 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
14682 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
14684 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
14686 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
14688 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
14689 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
14690 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
14691 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
14692 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
14693 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
14695 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
14697 /* Original 3DNow! */
14698 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
14699 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
14700 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
14701 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
14702 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
14703 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
14704 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
14705 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
14706 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
14707 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
14708 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
14709 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
14710 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
14711 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
14712 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
14713 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
14714 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
14715 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
14716 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
14717 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
14719 /* 3DNow! extension as used in the Athlon CPU. */
14720 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
14721 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
14722 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
14723 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
14724 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
14725 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
14728 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
14730 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
14731 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
14733 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
14734 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
14736 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
14737 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
14738 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
14739 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
14740 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
14742 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
14743 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
14744 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
14745 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
14747 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
14748 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
14750 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
14752 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
14753 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
14755 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
14756 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
14757 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
14758 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
14759 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
14761 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
14763 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
14764 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
14765 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
14766 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
14768 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
14769 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
14770 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
14772 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
14773 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
14774 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
14775 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
14777 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
14778 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
14779 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
14781 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
14782 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
14784 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
14785 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
14787 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
14788 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
14789 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
14791 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
14792 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
14793 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
14795 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
14796 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
14798 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
14799 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
14800 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
14801 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
14803 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
14804 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
14805 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
14806 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
14808 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
14809 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
14811 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
14813 /* Prescott New Instructions. */
14814 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
14815 void_ftype_pcvoid_unsigned_unsigned,
14816 IX86_BUILTIN_MONITOR);
14817 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
14818 void_ftype_unsigned_unsigned,
14819 IX86_BUILTIN_MWAIT);
14820 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
14822 IX86_BUILTIN_MOVSHDUP);
14823 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
14825 IX86_BUILTIN_MOVSLDUP);
14826 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
14827 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
14829 /* Access to the vec_init patterns. */
14830 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
14831 integer_type_node, NULL_TREE);
14832 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
14833 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
14835 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
14836 short_integer_type_node,
14837 short_integer_type_node,
14838 short_integer_type_node, NULL_TREE);
14839 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
14840 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
14842 ftype = build_function_type_list (V8QI_type_node, char_type_node,
14843 char_type_node, char_type_node,
14844 char_type_node, char_type_node,
14845 char_type_node, char_type_node,
14846 char_type_node, NULL_TREE);
14847 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
14848 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
14850 /* Access to the vec_extract patterns. */
14851 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14852 integer_type_node, NULL_TREE);
14853 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
14854 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
14856 ftype = build_function_type_list (long_long_integer_type_node,
14857 V2DI_type_node, integer_type_node,
14859 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
14860 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
14862 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14863 integer_type_node, NULL_TREE);
14864 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
14865 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
14867 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14868 integer_type_node, NULL_TREE);
14869 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
14870 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
14872 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14873 integer_type_node, NULL_TREE);
14874 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
14875 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
14877 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
14878 integer_type_node, NULL_TREE);
14879 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
14880 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
14882 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
14883 integer_type_node, NULL_TREE);
14884 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
14885 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
14887 /* Access to the vec_set patterns. */
14888 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14890 integer_type_node, NULL_TREE);
14891 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
14892 ftype, IX86_BUILTIN_VEC_SET_V8HI);
14894 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
14896 integer_type_node, NULL_TREE);
14897 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
14898 ftype, IX86_BUILTIN_VEC_SET_V4HI);
14901 /* Errors in the source file can cause expand_expr to return const0_rtx
14902 where we expect a vector. To avoid crashing, use one of the vector
14903 clear instructions. */
14905 safe_vector_operand (rtx x, enum machine_mode mode)
14907 if (x == const0_rtx)
14908 x = CONST0_RTX (mode);
14912 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
14915 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
14918 tree arg0 = TREE_VALUE (arglist);
14919 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14920 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14921 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14922 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14923 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14924 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
14926 if (VECTOR_MODE_P (mode0))
14927 op0 = safe_vector_operand (op0, mode0);
14928 if (VECTOR_MODE_P (mode1))
14929 op1 = safe_vector_operand (op1, mode1);
14931 if (optimize || !target
14932 || GET_MODE (target) != tmode
14933 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14934 target = gen_reg_rtx (tmode);
14936 if (GET_MODE (op1) == SImode && mode1 == TImode)
14938 rtx x = gen_reg_rtx (V4SImode);
14939 emit_insn (gen_sse2_loadd (x, op1));
14940 op1 = gen_lowpart (TImode, x);
14943 /* The insn must want input operands in the same modes as the
14945 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
14946 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
14948 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
14949 op0 = copy_to_mode_reg (mode0, op0);
14950 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
14951 op1 = copy_to_mode_reg (mode1, op1);
14953 /* ??? Using ix86_fixup_binary_operands is problematic when
14954 we've got mismatched modes. Fake it. */
14960 if (tmode == mode0 && tmode == mode1)
14962 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
14966 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
14968 op0 = force_reg (mode0, op0);
14969 op1 = force_reg (mode1, op1);
14970 target = gen_reg_rtx (tmode);
14973 pat = GEN_FCN (icode) (target, op0, op1);
14980 /* Subroutine of ix86_expand_builtin to take care of stores. */
14983 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
14986 tree arg0 = TREE_VALUE (arglist);
14987 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14988 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14989 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14990 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
14991 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
14993 if (VECTOR_MODE_P (mode1))
14994 op1 = safe_vector_operand (op1, mode1);
14996 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14997 op1 = copy_to_mode_reg (mode1, op1);
14999 pat = GEN_FCN (icode) (op0, op1);
15005 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
15008 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
15009 rtx target, int do_load)
15012 tree arg0 = TREE_VALUE (arglist);
15013 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15014 enum machine_mode tmode = insn_data[icode].operand[0].mode;
15015 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
15017 if (optimize || !target
15018 || GET_MODE (target) != tmode
15019 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15020 target = gen_reg_rtx (tmode);
15022 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15025 if (VECTOR_MODE_P (mode0))
15026 op0 = safe_vector_operand (op0, mode0);
15028 if ((optimize && !register_operand (op0, mode0))
15029 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15030 op0 = copy_to_mode_reg (mode0, op0);
15033 pat = GEN_FCN (icode) (target, op0);
15040 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
15041 sqrtss, rsqrtss, rcpss. */
15044 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
15047 tree arg0 = TREE_VALUE (arglist);
15048 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15049 enum machine_mode tmode = insn_data[icode].operand[0].mode;
15050 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
15052 if (optimize || !target
15053 || GET_MODE (target) != tmode
15054 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15055 target = gen_reg_rtx (tmode);
15057 if (VECTOR_MODE_P (mode0))
15058 op0 = safe_vector_operand (op0, mode0);
15060 if ((optimize && !register_operand (op0, mode0))
15061 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15062 op0 = copy_to_mode_reg (mode0, op0);
15065 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
15066 op1 = copy_to_mode_reg (mode0, op1);
15068 pat = GEN_FCN (icode) (target, op0, op1);
15075 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
15078 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
15082 tree arg0 = TREE_VALUE (arglist);
15083 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15084 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15085 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15087 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
15088 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
15089 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
15090 enum rtx_code comparison = d->comparison;
15092 if (VECTOR_MODE_P (mode0))
15093 op0 = safe_vector_operand (op0, mode0);
15094 if (VECTOR_MODE_P (mode1))
15095 op1 = safe_vector_operand (op1, mode1);
15097 /* Swap operands if we have a comparison that isn't available in
15099 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
15101 rtx tmp = gen_reg_rtx (mode1);
15102 emit_move_insn (tmp, op1);
15107 if (optimize || !target
15108 || GET_MODE (target) != tmode
15109 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
15110 target = gen_reg_rtx (tmode);
15112 if ((optimize && !register_operand (op0, mode0))
15113 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
15114 op0 = copy_to_mode_reg (mode0, op0);
15115 if ((optimize && !register_operand (op1, mode1))
15116 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
15117 op1 = copy_to_mode_reg (mode1, op1);
15119 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
15120 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
15127 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
15130 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
15134 tree arg0 = TREE_VALUE (arglist);
15135 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15136 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15137 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15139 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
15140 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
15141 enum rtx_code comparison = d->comparison;
15143 if (VECTOR_MODE_P (mode0))
15144 op0 = safe_vector_operand (op0, mode0);
15145 if (VECTOR_MODE_P (mode1))
15146 op1 = safe_vector_operand (op1, mode1);
15148 /* Swap operands if we have a comparison that isn't available in
15150 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
15157 target = gen_reg_rtx (SImode);
15158 emit_move_insn (target, const0_rtx);
15159 target = gen_rtx_SUBREG (QImode, target, 0);
15161 if ((optimize && !register_operand (op0, mode0))
15162 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15163 op0 = copy_to_mode_reg (mode0, op0);
15164 if ((optimize && !register_operand (op1, mode1))
15165 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15166 op1 = copy_to_mode_reg (mode1, op1);
15168 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
15169 pat = GEN_FCN (d->icode) (op0, op1);
15173 emit_insn (gen_rtx_SET (VOIDmode,
15174 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
15175 gen_rtx_fmt_ee (comparison, QImode,
15179 return SUBREG_REG (target);
15182 /* Return the integer constant in ARG. Constrain it to be in the range
15183 of the subparts of VEC_TYPE; issue an error if not. */
15186 get_element_number (tree vec_type, tree arg)
15188 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15190 if (!host_integerp (arg, 1)
15191 || (elt = tree_low_cst (arg, 1), elt > max))
15193 error ("selector must be an integer constant in the range 0..%wi", max);
15200 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15201 ix86_expand_vector_init. We DO have language-level syntax for this, in
15202 the form of (type){ init-list }. Except that since we can't place emms
15203 instructions from inside the compiler, we can't allow the use of MMX
15204 registers unless the user explicitly asks for it. So we do *not* define
15205 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
15206 we have builtins invoked by mmintrin.h that gives us license to emit
15207 these sorts of instructions. */
15210 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
15212 enum machine_mode tmode = TYPE_MODE (type);
15213 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
15214 int i, n_elt = GET_MODE_NUNITS (tmode);
15215 rtvec v = rtvec_alloc (n_elt);
15217 gcc_assert (VECTOR_MODE_P (tmode));
15219 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
15221 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15222 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15225 gcc_assert (arglist == NULL);
15227 if (!target || !register_operand (target, tmode))
15228 target = gen_reg_rtx (tmode);
15230 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
15234 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15235 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
15236 had a language-level syntax for referencing vector elements. */
15239 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
15241 enum machine_mode tmode, mode0;
15246 arg0 = TREE_VALUE (arglist);
15247 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15249 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15250 elt = get_element_number (TREE_TYPE (arg0), arg1);
15252 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15253 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15254 gcc_assert (VECTOR_MODE_P (mode0));
15256 op0 = force_reg (mode0, op0);
15258 if (optimize || !target || !register_operand (target, tmode))
15259 target = gen_reg_rtx (tmode);
15261 ix86_expand_vector_extract (true, target, op0, elt);
15266 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15267 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
15268 a language-level syntax for referencing vector elements. */
15271 ix86_expand_vec_set_builtin (tree arglist)
15273 enum machine_mode tmode, mode1;
15274 tree arg0, arg1, arg2;
15278 arg0 = TREE_VALUE (arglist);
15279 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15280 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15282 tmode = TYPE_MODE (TREE_TYPE (arg0));
15283 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15284 gcc_assert (VECTOR_MODE_P (tmode));
15286 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
15287 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
15288 elt = get_element_number (TREE_TYPE (arg0), arg2);
15290 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15291 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15293 op0 = force_reg (tmode, op0);
15294 op1 = force_reg (mode1, op1);
15296 ix86_expand_vector_set (true, op0, op1, elt);
15301 /* Expand an expression EXP that calls a built-in function,
15302 with result going to TARGET if that's convenient
15303 (and in mode MODE if that's convenient).
15304 SUBTARGET may be used as the target for computing one of EXP's operands.
15305 IGNORE is nonzero if the value is to be ignored. */
15308 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15309 enum machine_mode mode ATTRIBUTE_UNUSED,
15310 int ignore ATTRIBUTE_UNUSED)
15312 const struct builtin_description *d;
15314 enum insn_code icode;
15315 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
15316 tree arglist = TREE_OPERAND (exp, 1);
15317 tree arg0, arg1, arg2;
15318 rtx op0, op1, op2, pat;
15319 enum machine_mode tmode, mode0, mode1, mode2;
15320 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15324 case IX86_BUILTIN_EMMS:
15325 emit_insn (gen_mmx_emms ());
15328 case IX86_BUILTIN_SFENCE:
15329 emit_insn (gen_sse_sfence ());
15332 case IX86_BUILTIN_MASKMOVQ:
15333 case IX86_BUILTIN_MASKMOVDQU:
15334 icode = (fcode == IX86_BUILTIN_MASKMOVQ
15335 ? CODE_FOR_mmx_maskmovq
15336 : CODE_FOR_sse2_maskmovdqu);
15337 /* Note the arg order is different from the operand order. */
15338 arg1 = TREE_VALUE (arglist);
15339 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
15340 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15341 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15342 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15343 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15344 mode0 = insn_data[icode].operand[0].mode;
15345 mode1 = insn_data[icode].operand[1].mode;
15346 mode2 = insn_data[icode].operand[2].mode;
15348 op0 = force_reg (Pmode, op0);
15349 op0 = gen_rtx_MEM (mode1, op0);
15351 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15352 op0 = copy_to_mode_reg (mode0, op0);
15353 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15354 op1 = copy_to_mode_reg (mode1, op1);
15355 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
15356 op2 = copy_to_mode_reg (mode2, op2);
15357 pat = GEN_FCN (icode) (op0, op1, op2);
15363 case IX86_BUILTIN_SQRTSS:
15364 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
15365 case IX86_BUILTIN_RSQRTSS:
15366 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
15367 case IX86_BUILTIN_RCPSS:
15368 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
15370 case IX86_BUILTIN_LOADUPS:
15371 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
15373 case IX86_BUILTIN_STOREUPS:
15374 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
15376 case IX86_BUILTIN_LOADHPS:
15377 case IX86_BUILTIN_LOADLPS:
15378 case IX86_BUILTIN_LOADHPD:
15379 case IX86_BUILTIN_LOADLPD:
15380 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
15381 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
15382 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
15383 : CODE_FOR_sse2_loadlpd);
15384 arg0 = TREE_VALUE (arglist);
15385 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15386 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15387 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15388 tmode = insn_data[icode].operand[0].mode;
15389 mode0 = insn_data[icode].operand[1].mode;
15390 mode1 = insn_data[icode].operand[2].mode;
15392 op0 = force_reg (mode0, op0);
15393 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
15394 if (optimize || target == 0
15395 || GET_MODE (target) != tmode
15396 || !register_operand (target, tmode))
15397 target = gen_reg_rtx (tmode);
15398 pat = GEN_FCN (icode) (target, op0, op1);
15404 case IX86_BUILTIN_STOREHPS:
15405 case IX86_BUILTIN_STORELPS:
15406 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
15407 : CODE_FOR_sse_storelps);
15408 arg0 = TREE_VALUE (arglist);
15409 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15410 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15411 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15412 mode0 = insn_data[icode].operand[0].mode;
15413 mode1 = insn_data[icode].operand[1].mode;
15415 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15416 op1 = force_reg (mode1, op1);
15418 pat = GEN_FCN (icode) (op0, op1);
15424 case IX86_BUILTIN_MOVNTPS:
15425 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
15426 case IX86_BUILTIN_MOVNTQ:
15427 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
15429 case IX86_BUILTIN_LDMXCSR:
15430 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15431 target = assign_386_stack_local (SImode, SLOT_TEMP);
15432 emit_move_insn (target, op0);
15433 emit_insn (gen_sse_ldmxcsr (target));
15436 case IX86_BUILTIN_STMXCSR:
15437 target = assign_386_stack_local (SImode, SLOT_TEMP);
15438 emit_insn (gen_sse_stmxcsr (target));
15439 return copy_to_mode_reg (SImode, target);
15441 case IX86_BUILTIN_SHUFPS:
15442 case IX86_BUILTIN_SHUFPD:
15443 icode = (fcode == IX86_BUILTIN_SHUFPS
15444 ? CODE_FOR_sse_shufps
15445 : CODE_FOR_sse2_shufpd);
15446 arg0 = TREE_VALUE (arglist);
15447 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15448 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15449 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15450 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15451 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15452 tmode = insn_data[icode].operand[0].mode;
15453 mode0 = insn_data[icode].operand[1].mode;
15454 mode1 = insn_data[icode].operand[2].mode;
15455 mode2 = insn_data[icode].operand[3].mode;
15457 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15458 op0 = copy_to_mode_reg (mode0, op0);
15459 if ((optimize && !register_operand (op1, mode1))
15460 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
15461 op1 = copy_to_mode_reg (mode1, op1);
15462 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15464 /* @@@ better error message */
15465 error ("mask must be an immediate");
15466 return gen_reg_rtx (tmode);
15468 if (optimize || target == 0
15469 || GET_MODE (target) != tmode
15470 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15471 target = gen_reg_rtx (tmode);
15472 pat = GEN_FCN (icode) (target, op0, op1, op2);
15478 case IX86_BUILTIN_PSHUFW:
15479 case IX86_BUILTIN_PSHUFD:
15480 case IX86_BUILTIN_PSHUFHW:
15481 case IX86_BUILTIN_PSHUFLW:
15482 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
15483 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
15484 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
15485 : CODE_FOR_mmx_pshufw);
15486 arg0 = TREE_VALUE (arglist);
15487 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15488 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15489 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15490 tmode = insn_data[icode].operand[0].mode;
15491 mode1 = insn_data[icode].operand[1].mode;
15492 mode2 = insn_data[icode].operand[2].mode;
15494 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15495 op0 = copy_to_mode_reg (mode1, op0);
15496 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15498 /* @@@ better error message */
15499 error ("mask must be an immediate");
15503 || GET_MODE (target) != tmode
15504 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15505 target = gen_reg_rtx (tmode);
15506 pat = GEN_FCN (icode) (target, op0, op1);
15512 case IX86_BUILTIN_PSLLDQI128:
15513 case IX86_BUILTIN_PSRLDQI128:
15514 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
15515 : CODE_FOR_sse2_lshrti3);
15516 arg0 = TREE_VALUE (arglist);
15517 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15518 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15519 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15520 tmode = insn_data[icode].operand[0].mode;
15521 mode1 = insn_data[icode].operand[1].mode;
15522 mode2 = insn_data[icode].operand[2].mode;
15524 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15526 op0 = copy_to_reg (op0);
15527 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
15529 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15531 error ("shift must be an immediate");
15534 target = gen_reg_rtx (V2DImode);
15535 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
15541 case IX86_BUILTIN_FEMMS:
15542 emit_insn (gen_mmx_femms ());
15545 case IX86_BUILTIN_PAVGUSB:
15546 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
15548 case IX86_BUILTIN_PF2ID:
15549 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
15551 case IX86_BUILTIN_PFACC:
15552 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
15554 case IX86_BUILTIN_PFADD:
15555 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
15557 case IX86_BUILTIN_PFCMPEQ:
15558 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
15560 case IX86_BUILTIN_PFCMPGE:
15561 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
15563 case IX86_BUILTIN_PFCMPGT:
15564 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
15566 case IX86_BUILTIN_PFMAX:
15567 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
15569 case IX86_BUILTIN_PFMIN:
15570 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
15572 case IX86_BUILTIN_PFMUL:
15573 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
15575 case IX86_BUILTIN_PFRCP:
15576 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
15578 case IX86_BUILTIN_PFRCPIT1:
15579 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
15581 case IX86_BUILTIN_PFRCPIT2:
15582 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
15584 case IX86_BUILTIN_PFRSQIT1:
15585 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
15587 case IX86_BUILTIN_PFRSQRT:
15588 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
15590 case IX86_BUILTIN_PFSUB:
15591 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
15593 case IX86_BUILTIN_PFSUBR:
15594 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
15596 case IX86_BUILTIN_PI2FD:
15597 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
15599 case IX86_BUILTIN_PMULHRW:
15600 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
15602 case IX86_BUILTIN_PF2IW:
15603 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
15605 case IX86_BUILTIN_PFNACC:
15606 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
15608 case IX86_BUILTIN_PFPNACC:
15609 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
15611 case IX86_BUILTIN_PI2FW:
15612 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
15614 case IX86_BUILTIN_PSWAPDSI:
15615 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
15617 case IX86_BUILTIN_PSWAPDSF:
15618 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
15620 case IX86_BUILTIN_SQRTSD:
15621 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
15622 case IX86_BUILTIN_LOADUPD:
15623 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
15624 case IX86_BUILTIN_STOREUPD:
15625 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
15627 case IX86_BUILTIN_MFENCE:
15628 emit_insn (gen_sse2_mfence ());
15630 case IX86_BUILTIN_LFENCE:
15631 emit_insn (gen_sse2_lfence ());
15634 case IX86_BUILTIN_CLFLUSH:
15635 arg0 = TREE_VALUE (arglist);
15636 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15637 icode = CODE_FOR_sse2_clflush;
15638 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
15639 op0 = copy_to_mode_reg (Pmode, op0);
15641 emit_insn (gen_sse2_clflush (op0));
15644 case IX86_BUILTIN_MOVNTPD:
15645 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
15646 case IX86_BUILTIN_MOVNTDQ:
15647 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
15648 case IX86_BUILTIN_MOVNTI:
15649 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
15651 case IX86_BUILTIN_LOADDQU:
15652 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
15653 case IX86_BUILTIN_STOREDQU:
15654 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
15656 case IX86_BUILTIN_MONITOR:
15657 arg0 = TREE_VALUE (arglist);
15658 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15659 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15660 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15661 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15662 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15664 op0 = copy_to_mode_reg (SImode, op0);
15666 op1 = copy_to_mode_reg (SImode, op1);
15668 op2 = copy_to_mode_reg (SImode, op2);
15669 emit_insn (gen_sse3_monitor (op0, op1, op2));
15672 case IX86_BUILTIN_MWAIT:
15673 arg0 = TREE_VALUE (arglist);
15674 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15675 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15676 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15678 op0 = copy_to_mode_reg (SImode, op0);
15680 op1 = copy_to_mode_reg (SImode, op1);
15681 emit_insn (gen_sse3_mwait (op0, op1));
15684 case IX86_BUILTIN_LDDQU:
15685 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
15688 case IX86_BUILTIN_VEC_INIT_V2SI:
15689 case IX86_BUILTIN_VEC_INIT_V4HI:
15690 case IX86_BUILTIN_VEC_INIT_V8QI:
15691 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
15693 case IX86_BUILTIN_VEC_EXT_V2DF:
15694 case IX86_BUILTIN_VEC_EXT_V2DI:
15695 case IX86_BUILTIN_VEC_EXT_V4SF:
15696 case IX86_BUILTIN_VEC_EXT_V4SI:
15697 case IX86_BUILTIN_VEC_EXT_V8HI:
15698 case IX86_BUILTIN_VEC_EXT_V2SI:
15699 case IX86_BUILTIN_VEC_EXT_V4HI:
15700 return ix86_expand_vec_ext_builtin (arglist, target);
15702 case IX86_BUILTIN_VEC_SET_V8HI:
15703 case IX86_BUILTIN_VEC_SET_V4HI:
15704 return ix86_expand_vec_set_builtin (arglist);
15710 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15711 if (d->code == fcode)
15713 /* Compares are treated specially. */
15714 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
15715 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
15716 || d->icode == CODE_FOR_sse2_maskcmpv2df3
15717 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
15718 return ix86_expand_sse_compare (d, arglist, target);
15720 return ix86_expand_binop_builtin (d->icode, arglist, target);
15723 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15724 if (d->code == fcode)
15725 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
15727 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
15728 if (d->code == fcode)
15729 return ix86_expand_sse_comi (d, arglist, target);
15731 gcc_unreachable ();
15734 /* Store OPERAND to the memory after reload is completed. This means
15735 that we can't easily use assign_stack_local. */
15737 ix86_force_to_memory (enum machine_mode mode, rtx operand)
15741 gcc_assert (reload_completed);
15742 if (TARGET_RED_ZONE)
15744 result = gen_rtx_MEM (mode,
15745 gen_rtx_PLUS (Pmode,
15747 GEN_INT (-RED_ZONE_SIZE)));
15748 emit_move_insn (result, operand);
15750 else if (!TARGET_RED_ZONE && TARGET_64BIT)
15756 operand = gen_lowpart (DImode, operand);
15760 gen_rtx_SET (VOIDmode,
15761 gen_rtx_MEM (DImode,
15762 gen_rtx_PRE_DEC (DImode,
15763 stack_pointer_rtx)),
15767 gcc_unreachable ();
15769 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15778 split_di (&operand, 1, operands, operands + 1);
15780 gen_rtx_SET (VOIDmode,
15781 gen_rtx_MEM (SImode,
15782 gen_rtx_PRE_DEC (Pmode,
15783 stack_pointer_rtx)),
15786 gen_rtx_SET (VOIDmode,
15787 gen_rtx_MEM (SImode,
15788 gen_rtx_PRE_DEC (Pmode,
15789 stack_pointer_rtx)),
15794 /* It is better to store HImodes as SImodes. */
15795 if (!TARGET_PARTIAL_REG_STALL)
15796 operand = gen_lowpart (SImode, operand);
15800 gen_rtx_SET (VOIDmode,
15801 gen_rtx_MEM (GET_MODE (operand),
15802 gen_rtx_PRE_DEC (SImode,
15803 stack_pointer_rtx)),
15807 gcc_unreachable ();
15809 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15814 /* Free operand from the memory. */
15816 ix86_free_from_memory (enum machine_mode mode)
15818 if (!TARGET_RED_ZONE)
15822 if (mode == DImode || TARGET_64BIT)
15824 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
15828 /* Use LEA to deallocate stack space. In peephole2 it will be converted
15829 to pop or add instruction if registers are available. */
15830 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
15831 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
15836 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
15837 QImode must go into class Q_REGS.
15838 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
15839 movdf to do mem-to-mem moves through integer regs. */
15841 ix86_preferred_reload_class (rtx x, enum reg_class class)
15843 /* We're only allowed to return a subclass of CLASS. Many of the
15844 following checks fail for NO_REGS, so eliminate that early. */
15845 if (class == NO_REGS)
15848 /* All classes can load zeros. */
15849 if (x == CONST0_RTX (GET_MODE (x)))
15852 /* Floating-point constants need more complex checks. */
15853 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
15855 /* General regs can load everything. */
15856 if (reg_class_subset_p (class, GENERAL_REGS))
15859 /* Floats can load 0 and 1 plus some others. Note that we eliminated
15860 zero above. We only want to wind up preferring 80387 registers if
15861 we plan on doing computation with them. */
15863 && (TARGET_MIX_SSE_I387
15864 || !(TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (x))))
15865 && standard_80387_constant_p (x))
15867 /* Limit class to non-sse. */
15868 if (class == FLOAT_SSE_REGS)
15870 if (class == FP_TOP_SSE_REGS)
15872 if (class == FP_SECOND_SSE_REGS)
15873 return FP_SECOND_REG;
15874 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
15880 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
15882 if (MAYBE_SSE_CLASS_P (class) && CONSTANT_P (x))
15885 /* Generally when we see PLUS here, it's the function invariant
15886 (plus soft-fp const_int). Which can only be computed into general
15888 if (GET_CODE (x) == PLUS)
15889 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
15891 /* QImode constants are easy to load, but non-constant QImode data
15892 must go into Q_REGS. */
15893 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
15895 if (reg_class_subset_p (class, Q_REGS))
15897 if (reg_class_subset_p (Q_REGS, class))
15905 /* If we are copying between general and FP registers, we need a memory
15906 location. The same is true for SSE and MMX registers.
15908 The macro can't work reliably when one of the CLASSES is class containing
15909 registers from multiple units (SSE, MMX, integer). We avoid this by never
15910 combining those units in single alternative in the machine description.
15911 Ensure that this constraint holds to avoid unexpected surprises.
15913 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
15914 enforce these sanity checks. */
15917 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
15918 enum machine_mode mode, int strict)
15920 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
15921 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
15922 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
15923 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
15924 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
15925 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
15927 gcc_assert (!strict);
15931 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
15934 /* ??? This is a lie. We do have moves between mmx/general, and for
15935 mmx/sse2. But by saying we need secondary memory we discourage the
15936 register allocator from using the mmx registers unless needed. */
15937 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
15940 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15942 /* SSE1 doesn't have any direct moves from other classes. */
15946 /* If the target says that inter-unit moves are more expensive
15947 than moving through memory, then don't generate them. */
15948 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
15951 /* Between SSE and general, we have moves no larger than word size. */
15952 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
15955 /* ??? For the cost of one register reformat penalty, we could use
15956 the same instructions to move SFmode and DFmode data, but the
15957 relevant move patterns don't support those alternatives. */
15958 if (mode == SFmode || mode == DFmode)
15965 /* Return true if the registers in CLASS cannot represent the change from
15966 modes FROM to TO. */
15969 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
15970 enum reg_class class)
15975 /* x87 registers can't do subreg at all, as all values are reformatted
15976 to extended precision. */
15977 if (MAYBE_FLOAT_CLASS_P (class))
15980 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
15982 /* Vector registers do not support QI or HImode loads. If we don't
15983 disallow a change to these modes, reload will assume it's ok to
15984 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
15985 the vec_dupv4hi pattern. */
15986 if (GET_MODE_SIZE (from) < 4)
15989 /* Vector registers do not support subreg with nonzero offsets, which
15990 are otherwise valid for integer registers. Since we can't see
15991 whether we have a nonzero offset from here, prohibit all
15992 nonparadoxical subregs changing size. */
15993 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
16000 /* Return the cost of moving data from a register in class CLASS1 to
16001 one in class CLASS2.
16003 It is not required that the cost always equal 2 when FROM is the same as TO;
16004 on some machines it is expensive to move between registers if they are not
16005 general registers. */
16008 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
16009 enum reg_class class2)
16011 /* In case we require secondary memory, compute cost of the store followed
16012 by load. In order to avoid bad register allocation choices, we need
16013 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
16015 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
16019 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
16020 MEMORY_MOVE_COST (mode, class1, 1));
16021 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
16022 MEMORY_MOVE_COST (mode, class2, 1));
16024 /* In case of copying from general_purpose_register we may emit multiple
16025 stores followed by single load causing memory size mismatch stall.
16026 Count this as arbitrarily high cost of 20. */
16027 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
16030 /* In the case of FP/MMX moves, the registers actually overlap, and we
16031 have to switch modes in order to treat them differently. */
16032 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
16033 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
16039 /* Moves between SSE/MMX and integer unit are expensive. */
16040 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
16041 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
16042 return ix86_cost->mmxsse_to_integer;
16043 if (MAYBE_FLOAT_CLASS_P (class1))
16044 return ix86_cost->fp_move;
16045 if (MAYBE_SSE_CLASS_P (class1))
16046 return ix86_cost->sse_move;
16047 if (MAYBE_MMX_CLASS_P (class1))
16048 return ix86_cost->mmx_move;
16052 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
16055 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
16057 /* Flags and only flags can only hold CCmode values. */
16058 if (CC_REGNO_P (regno))
16059 return GET_MODE_CLASS (mode) == MODE_CC;
16060 if (GET_MODE_CLASS (mode) == MODE_CC
16061 || GET_MODE_CLASS (mode) == MODE_RANDOM
16062 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
16064 if (FP_REGNO_P (regno))
16065 return VALID_FP_MODE_P (mode);
16066 if (SSE_REGNO_P (regno))
16068 /* We implement the move patterns for all vector modes into and
16069 out of SSE registers, even when no operation instructions
16071 return (VALID_SSE_REG_MODE (mode)
16072 || VALID_SSE2_REG_MODE (mode)
16073 || VALID_MMX_REG_MODE (mode)
16074 || VALID_MMX_REG_MODE_3DNOW (mode));
16076 if (MMX_REGNO_P (regno))
16078 /* We implement the move patterns for 3DNOW modes even in MMX mode,
16079 so if the register is available at all, then we can move data of
16080 the given mode into or out of it. */
16081 return (VALID_MMX_REG_MODE (mode)
16082 || VALID_MMX_REG_MODE_3DNOW (mode));
16085 if (mode == QImode)
16087 /* Take care for QImode values - they can be in non-QI regs,
16088 but then they do cause partial register stalls. */
16089 if (regno < 4 || TARGET_64BIT)
16091 if (!TARGET_PARTIAL_REG_STALL)
16093 return reload_in_progress || reload_completed;
16095 /* We handle both integer and floats in the general purpose registers. */
16096 else if (VALID_INT_MODE_P (mode))
16098 else if (VALID_FP_MODE_P (mode))
16100 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
16101 on to use that value in smaller contexts, this can easily force a
16102 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
16103 supporting DImode, allow it. */
16104 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
16110 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
16111 tieable integer mode. */
16114 ix86_tieable_integer_mode_p (enum machine_mode mode)
16123 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
16126 return TARGET_64BIT;
16133 /* Return true if MODE1 is accessible in a register that can hold MODE2
16134 without copying. That is, all register classes that can hold MODE2
16135 can also hold MODE1. */
16138 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
16140 if (mode1 == mode2)
16143 if (ix86_tieable_integer_mode_p (mode1)
16144 && ix86_tieable_integer_mode_p (mode2))
16147 /* MODE2 being XFmode implies fp stack or general regs, which means we
16148 can tie any smaller floating point modes to it. Note that we do not
16149 tie this with TFmode. */
16150 if (mode2 == XFmode)
16151 return mode1 == SFmode || mode1 == DFmode;
16153 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
16154 that we can tie it with SFmode. */
16155 if (mode2 == DFmode)
16156 return mode1 == SFmode;
16158 /* If MODE2 is only appropriate for an SSE register, then tie with
16159 any other mode acceptable to SSE registers. */
16160 if (GET_MODE_SIZE (mode2) >= 8
16161 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
16162 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
16164 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
16165 with any other mode acceptable to MMX registers. */
16166 if (GET_MODE_SIZE (mode2) == 8
16167 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
16168 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
16173 /* Return the cost of moving data of mode M between a
16174 register and memory. A value of 2 is the default; this cost is
16175 relative to those in `REGISTER_MOVE_COST'.
16177 If moving between registers and memory is more expensive than
16178 between two registers, you should define this macro to express the
16181 Model also increased moving costs of QImode registers in non
16185 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
16187 if (FLOAT_CLASS_P (class))
16204 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
16206 if (SSE_CLASS_P (class))
16209 switch (GET_MODE_SIZE (mode))
16223 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
16225 if (MMX_CLASS_P (class))
16228 switch (GET_MODE_SIZE (mode))
16239 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
16241 switch (GET_MODE_SIZE (mode))
16245 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
16246 : ix86_cost->movzbl_load);
16248 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
16249 : ix86_cost->int_store[0] + 4);
16252 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
16254 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
16255 if (mode == TFmode)
16257 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
16258 * (((int) GET_MODE_SIZE (mode)
16259 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
16263 /* Compute a (partial) cost for rtx X. Return true if the complete
16264 cost has been computed, and false if subexpressions should be
16265 scanned. In either case, *TOTAL contains the cost result. */
16268 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
16270 enum machine_mode mode = GET_MODE (x);
16278 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
16280 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
16282 else if (flag_pic && SYMBOLIC_CONST (x)
16284 || (!GET_CODE (x) != LABEL_REF
16285 && (GET_CODE (x) != SYMBOL_REF
16286 || !SYMBOL_REF_LOCAL_P (x)))))
16293 if (mode == VOIDmode)
16296 switch (standard_80387_constant_p (x))
16301 default: /* Other constants */
16306 /* Start with (MEM (SYMBOL_REF)), since that's where
16307 it'll probably end up. Add a penalty for size. */
16308 *total = (COSTS_N_INSNS (1)
16309 + (flag_pic != 0 && !TARGET_64BIT)
16310 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
16316 /* The zero extensions is often completely free on x86_64, so make
16317 it as cheap as possible. */
16318 if (TARGET_64BIT && mode == DImode
16319 && GET_MODE (XEXP (x, 0)) == SImode)
16321 else if (TARGET_ZERO_EXTEND_WITH_AND)
16322 *total = COSTS_N_INSNS (ix86_cost->add);
16324 *total = COSTS_N_INSNS (ix86_cost->movzx);
16328 *total = COSTS_N_INSNS (ix86_cost->movsx);
16332 if (GET_CODE (XEXP (x, 1)) == CONST_INT
16333 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
16335 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16338 *total = COSTS_N_INSNS (ix86_cost->add);
16341 if ((value == 2 || value == 3)
16342 && ix86_cost->lea <= ix86_cost->shift_const)
16344 *total = COSTS_N_INSNS (ix86_cost->lea);
16354 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
16356 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16358 if (INTVAL (XEXP (x, 1)) > 32)
16359 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
16361 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
16365 if (GET_CODE (XEXP (x, 1)) == AND)
16366 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
16368 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
16373 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16374 *total = COSTS_N_INSNS (ix86_cost->shift_const);
16376 *total = COSTS_N_INSNS (ix86_cost->shift_var);
16381 if (FLOAT_MODE_P (mode))
16383 *total = COSTS_N_INSNS (ix86_cost->fmul);
16388 rtx op0 = XEXP (x, 0);
16389 rtx op1 = XEXP (x, 1);
16391 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16393 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16394 for (nbits = 0; value != 0; value &= value - 1)
16398 /* This is arbitrary. */
16401 /* Compute costs correctly for widening multiplication. */
16402 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
16403 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
16404 == GET_MODE_SIZE (mode))
16406 int is_mulwiden = 0;
16407 enum machine_mode inner_mode = GET_MODE (op0);
16409 if (GET_CODE (op0) == GET_CODE (op1))
16410 is_mulwiden = 1, op1 = XEXP (op1, 0);
16411 else if (GET_CODE (op1) == CONST_INT)
16413 if (GET_CODE (op0) == SIGN_EXTEND)
16414 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
16417 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
16421 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
16424 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
16425 + nbits * ix86_cost->mult_bit)
16426 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
16435 if (FLOAT_MODE_P (mode))
16436 *total = COSTS_N_INSNS (ix86_cost->fdiv);
16438 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
16442 if (FLOAT_MODE_P (mode))
16443 *total = COSTS_N_INSNS (ix86_cost->fadd);
16444 else if (GET_MODE_CLASS (mode) == MODE_INT
16445 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
16447 if (GET_CODE (XEXP (x, 0)) == PLUS
16448 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
16449 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
16450 && CONSTANT_P (XEXP (x, 1)))
16452 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
16453 if (val == 2 || val == 4 || val == 8)
16455 *total = COSTS_N_INSNS (ix86_cost->lea);
16456 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16457 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
16459 *total += rtx_cost (XEXP (x, 1), outer_code);
16463 else if (GET_CODE (XEXP (x, 0)) == MULT
16464 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
16466 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
16467 if (val == 2 || val == 4 || val == 8)
16469 *total = COSTS_N_INSNS (ix86_cost->lea);
16470 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16471 *total += rtx_cost (XEXP (x, 1), outer_code);
16475 else if (GET_CODE (XEXP (x, 0)) == PLUS)
16477 *total = COSTS_N_INSNS (ix86_cost->lea);
16478 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16479 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16480 *total += rtx_cost (XEXP (x, 1), outer_code);
16487 if (FLOAT_MODE_P (mode))
16489 *total = COSTS_N_INSNS (ix86_cost->fadd);
16497 if (!TARGET_64BIT && mode == DImode)
16499 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
16500 + (rtx_cost (XEXP (x, 0), outer_code)
16501 << (GET_MODE (XEXP (x, 0)) != DImode))
16502 + (rtx_cost (XEXP (x, 1), outer_code)
16503 << (GET_MODE (XEXP (x, 1)) != DImode)));
16509 if (FLOAT_MODE_P (mode))
16511 *total = COSTS_N_INSNS (ix86_cost->fchs);
16517 if (!TARGET_64BIT && mode == DImode)
16518 *total = COSTS_N_INSNS (ix86_cost->add * 2);
16520 *total = COSTS_N_INSNS (ix86_cost->add);
16524 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
16525 && XEXP (XEXP (x, 0), 1) == const1_rtx
16526 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
16527 && XEXP (x, 1) == const0_rtx)
16529 /* This kind of construct is implemented using test[bwl].
16530 Treat it as if we had an AND. */
16531 *total = (COSTS_N_INSNS (ix86_cost->add)
16532 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
16533 + rtx_cost (const1_rtx, outer_code));
16539 if (!TARGET_SSE_MATH
16541 || (mode == DFmode && !TARGET_SSE2))
16546 if (FLOAT_MODE_P (mode))
16547 *total = COSTS_N_INSNS (ix86_cost->fabs);
16551 if (FLOAT_MODE_P (mode))
16552 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
16556 if (XINT (x, 1) == UNSPEC_TP)
16567 static int current_machopic_label_num;
16569 /* Given a symbol name and its associated stub, write out the
16570 definition of the stub. */
16573 machopic_output_stub (FILE *file, const char *symb, const char *stub)
16575 unsigned int length;
16576 char *binder_name, *symbol_name, lazy_ptr_name[32];
16577 int label = ++current_machopic_label_num;
16579 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
16580 symb = (*targetm.strip_name_encoding) (symb);
16582 length = strlen (stub);
16583 binder_name = alloca (length + 32);
16584 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
16586 length = strlen (symb);
16587 symbol_name = alloca (length + 32);
16588 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
16590 sprintf (lazy_ptr_name, "L%d$lz", label);
16593 machopic_picsymbol_stub_section ();
16595 machopic_symbol_stub_section ();
16597 fprintf (file, "%s:\n", stub);
16598 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16602 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
16603 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
16604 fprintf (file, "\tjmp %%edx\n");
16607 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
16609 fprintf (file, "%s:\n", binder_name);
16613 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
16614 fprintf (file, "\tpushl %%eax\n");
16617 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
16619 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
16621 machopic_lazy_symbol_ptr_section ();
16622 fprintf (file, "%s:\n", lazy_ptr_name);
16623 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16624 fprintf (file, "\t.long %s\n", binder_name);
16626 #endif /* TARGET_MACHO */
16628 /* Order the registers for register allocator. */
16631 x86_order_regs_for_local_alloc (void)
16636 /* First allocate the local general purpose registers. */
16637 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16638 if (GENERAL_REGNO_P (i) && call_used_regs[i])
16639 reg_alloc_order [pos++] = i;
16641 /* Global general purpose registers. */
16642 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16643 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
16644 reg_alloc_order [pos++] = i;
16646 /* x87 registers come first in case we are doing FP math
16648 if (!TARGET_SSE_MATH)
16649 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16650 reg_alloc_order [pos++] = i;
16652 /* SSE registers. */
16653 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
16654 reg_alloc_order [pos++] = i;
16655 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
16656 reg_alloc_order [pos++] = i;
16658 /* x87 registers. */
16659 if (TARGET_SSE_MATH)
16660 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16661 reg_alloc_order [pos++] = i;
16663 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
16664 reg_alloc_order [pos++] = i;
16666 /* Initialize the rest of array as we do not allocate some registers
16668 while (pos < FIRST_PSEUDO_REGISTER)
16669 reg_alloc_order [pos++] = 0;
16672 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
16673 struct attribute_spec.handler. */
16675 ix86_handle_struct_attribute (tree *node, tree name,
16676 tree args ATTRIBUTE_UNUSED,
16677 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
16680 if (DECL_P (*node))
16682 if (TREE_CODE (*node) == TYPE_DECL)
16683 type = &TREE_TYPE (*node);
16688 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
16689 || TREE_CODE (*type) == UNION_TYPE)))
16691 warning (OPT_Wattributes, "%qs attribute ignored",
16692 IDENTIFIER_POINTER (name));
16693 *no_add_attrs = true;
16696 else if ((is_attribute_p ("ms_struct", name)
16697 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
16698 || ((is_attribute_p ("gcc_struct", name)
16699 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
16701 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
16702 IDENTIFIER_POINTER (name));
16703 *no_add_attrs = true;
16710 ix86_ms_bitfield_layout_p (tree record_type)
16712 return (TARGET_MS_BITFIELD_LAYOUT &&
16713 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
16714 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
16717 /* Returns an expression indicating where the this parameter is
16718 located on entry to the FUNCTION. */
16721 x86_this_parameter (tree function)
16723 tree type = TREE_TYPE (function);
16727 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
16728 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
16731 if (ix86_function_regparm (type, function) > 0)
16735 parm = TYPE_ARG_TYPES (type);
16736 /* Figure out whether or not the function has a variable number of
16738 for (; parm; parm = TREE_CHAIN (parm))
16739 if (TREE_VALUE (parm) == void_type_node)
16741 /* If not, the this parameter is in the first argument. */
16745 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
16747 return gen_rtx_REG (SImode, regno);
16751 if (aggregate_value_p (TREE_TYPE (type), type))
16752 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
16754 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
16757 /* Determine whether x86_output_mi_thunk can succeed. */
16760 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
16761 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
16762 HOST_WIDE_INT vcall_offset, tree function)
16764 /* 64-bit can handle anything. */
16768 /* For 32-bit, everything's fine if we have one free register. */
16769 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
16772 /* Need a free register for vcall_offset. */
16776 /* Need a free register for GOT references. */
16777 if (flag_pic && !(*targetm.binds_local_p) (function))
16780 /* Otherwise ok. */
16784 /* Output the assembler code for a thunk function. THUNK_DECL is the
16785 declaration for the thunk function itself, FUNCTION is the decl for
16786 the target function. DELTA is an immediate constant offset to be
16787 added to THIS. If VCALL_OFFSET is nonzero, the word at
16788 *(*this + vcall_offset) should be added to THIS. */
16791 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
16792 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
16793 HOST_WIDE_INT vcall_offset, tree function)
16796 rtx this = x86_this_parameter (function);
16799 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
16800 pull it in now and let DELTA benefit. */
16803 else if (vcall_offset)
16805 /* Put the this parameter into %eax. */
16807 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
16808 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16811 this_reg = NULL_RTX;
16813 /* Adjust the this parameter by a fixed constant. */
16816 xops[0] = GEN_INT (delta);
16817 xops[1] = this_reg ? this_reg : this;
16820 if (!x86_64_general_operand (xops[0], DImode))
16822 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16824 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
16828 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16831 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16834 /* Adjust the this parameter by a value stored in the vtable. */
16838 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16841 int tmp_regno = 2 /* ECX */;
16842 if (lookup_attribute ("fastcall",
16843 TYPE_ATTRIBUTES (TREE_TYPE (function))))
16844 tmp_regno = 0 /* EAX */;
16845 tmp = gen_rtx_REG (SImode, tmp_regno);
16848 xops[0] = gen_rtx_MEM (Pmode, this_reg);
16851 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16853 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16855 /* Adjust the this parameter. */
16856 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
16857 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
16859 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
16860 xops[0] = GEN_INT (vcall_offset);
16862 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16863 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
16865 xops[1] = this_reg;
16867 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16869 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16872 /* If necessary, drop THIS back to its stack slot. */
16873 if (this_reg && this_reg != this)
16875 xops[0] = this_reg;
16877 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16880 xops[0] = XEXP (DECL_RTL (function), 0);
16883 if (!flag_pic || (*targetm.binds_local_p) (function))
16884 output_asm_insn ("jmp\t%P0", xops);
16887 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
16888 tmp = gen_rtx_CONST (Pmode, tmp);
16889 tmp = gen_rtx_MEM (QImode, tmp);
16891 output_asm_insn ("jmp\t%A0", xops);
16896 if (!flag_pic || (*targetm.binds_local_p) (function))
16897 output_asm_insn ("jmp\t%P0", xops);
16902 rtx sym_ref = XEXP (DECL_RTL (function), 0);
16903 tmp = (gen_rtx_SYMBOL_REF
16905 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
16906 tmp = gen_rtx_MEM (QImode, tmp);
16908 output_asm_insn ("jmp\t%0", xops);
16911 #endif /* TARGET_MACHO */
16913 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
16914 output_set_got (tmp);
16917 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
16918 output_asm_insn ("jmp\t{*}%1", xops);
16924 x86_file_start (void)
16926 default_file_start ();
16927 if (X86_FILE_START_VERSION_DIRECTIVE)
16928 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
16929 if (X86_FILE_START_FLTUSED)
16930 fputs ("\t.global\t__fltused\n", asm_out_file);
16931 if (ix86_asm_dialect == ASM_INTEL)
16932 fputs ("\t.intel_syntax\n", asm_out_file);
16936 x86_field_alignment (tree field, int computed)
16938 enum machine_mode mode;
16939 tree type = TREE_TYPE (field);
16941 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
16943 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
16944 ? get_inner_array_type (type) : type);
16945 if (mode == DFmode || mode == DCmode
16946 || GET_MODE_CLASS (mode) == MODE_INT
16947 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
16948 return MIN (32, computed);
16952 /* Output assembler code to FILE to increment profiler label # LABELNO
16953 for profiling a function entry. */
16955 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
16960 #ifndef NO_PROFILE_COUNTERS
16961 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
16963 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
16967 #ifndef NO_PROFILE_COUNTERS
16968 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
16970 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16974 #ifndef NO_PROFILE_COUNTERS
16975 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
16976 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
16978 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
16982 #ifndef NO_PROFILE_COUNTERS
16983 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
16984 PROFILE_COUNT_REGISTER);
16986 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16990 /* We don't have exact information about the insn sizes, but we may assume
16991 quite safely that we are informed about all 1 byte insns and memory
16992 address sizes. This is enough to eliminate unnecessary padding in
16996 min_insn_size (rtx insn)
17000 if (!INSN_P (insn) || !active_insn_p (insn))
17003 /* Discard alignments we've emit and jump instructions. */
17004 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
17005 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
17007 if (GET_CODE (insn) == JUMP_INSN
17008 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
17009 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
17012 /* Important case - calls are always 5 bytes.
17013 It is common to have many calls in the row. */
17014 if (GET_CODE (insn) == CALL_INSN
17015 && symbolic_reference_mentioned_p (PATTERN (insn))
17016 && !SIBLING_CALL_P (insn))
17018 if (get_attr_length (insn) <= 1)
17021 /* For normal instructions we may rely on the sizes of addresses
17022 and the presence of symbol to require 4 bytes of encoding.
17023 This is not the case for jumps where references are PC relative. */
17024 if (GET_CODE (insn) != JUMP_INSN)
17026 l = get_attr_length_address (insn);
17027 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
17036 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
17040 ix86_avoid_jump_misspredicts (void)
17042 rtx insn, start = get_insns ();
17043 int nbytes = 0, njumps = 0;
17046 /* Look for all minimal intervals of instructions containing 4 jumps.
17047 The intervals are bounded by START and INSN. NBYTES is the total
17048 size of instructions in the interval including INSN and not including
17049 START. When the NBYTES is smaller than 16 bytes, it is possible
17050 that the end of START and INSN ends up in the same 16byte page.
17052 The smallest offset in the page INSN can start is the case where START
17053 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
17054 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
17056 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
17059 nbytes += min_insn_size (insn);
17061 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
17062 INSN_UID (insn), min_insn_size (insn));
17063 if ((GET_CODE (insn) == JUMP_INSN
17064 && GET_CODE (PATTERN (insn)) != ADDR_VEC
17065 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
17066 || GET_CODE (insn) == CALL_INSN)
17073 start = NEXT_INSN (start);
17074 if ((GET_CODE (start) == JUMP_INSN
17075 && GET_CODE (PATTERN (start)) != ADDR_VEC
17076 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
17077 || GET_CODE (start) == CALL_INSN)
17078 njumps--, isjump = 1;
17081 nbytes -= min_insn_size (start);
17083 gcc_assert (njumps >= 0);
17085 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
17086 INSN_UID (start), INSN_UID (insn), nbytes);
17088 if (njumps == 3 && isjump && nbytes < 16)
17090 int padsize = 15 - nbytes + min_insn_size (insn);
17093 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
17094 INSN_UID (insn), padsize);
17095 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
17100 /* AMD Athlon works faster
17101 when RET is not destination of conditional jump or directly preceded
17102 by other jump instruction. We avoid the penalty by inserting NOP just
17103 before the RET instructions in such cases. */
17105 ix86_pad_returns (void)
17110 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
17112 basic_block bb = e->src;
17113 rtx ret = BB_END (bb);
17115 bool replace = false;
17117 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
17118 || !maybe_hot_bb_p (bb))
17120 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
17121 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
17123 if (prev && GET_CODE (prev) == CODE_LABEL)
17128 FOR_EACH_EDGE (e, ei, bb->preds)
17129 if (EDGE_FREQUENCY (e) && e->src->index >= 0
17130 && !(e->flags & EDGE_FALLTHRU))
17135 prev = prev_active_insn (ret);
17137 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
17138 || GET_CODE (prev) == CALL_INSN))
17140 /* Empty functions get branch mispredict even when the jump destination
17141 is not visible to us. */
17142 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
17147 emit_insn_before (gen_return_internal_long (), ret);
17153 /* Implement machine specific optimizations. We implement padding of returns
17154 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
17158 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
17159 ix86_pad_returns ();
17160 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
17161 ix86_avoid_jump_misspredicts ();
17164 /* Return nonzero when QImode register that must be represented via REX prefix
17167 x86_extended_QIreg_mentioned_p (rtx insn)
17170 extract_insn_cached (insn);
17171 for (i = 0; i < recog_data.n_operands; i++)
17172 if (REG_P (recog_data.operand[i])
17173 && REGNO (recog_data.operand[i]) >= 4)
17178 /* Return nonzero when P points to register encoded via REX prefix.
17179 Called via for_each_rtx. */
17181 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
17183 unsigned int regno;
17186 regno = REGNO (*p);
17187 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
17190 /* Return true when INSN mentions register that must be encoded using REX
17193 x86_extended_reg_mentioned_p (rtx insn)
17195 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
17198 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
17199 optabs would emit if we didn't have TFmode patterns. */
17202 x86_emit_floatuns (rtx operands[2])
17204 rtx neglab, donelab, i0, i1, f0, in, out;
17205 enum machine_mode mode, inmode;
17207 inmode = GET_MODE (operands[1]);
17208 gcc_assert (inmode == SImode || inmode == DImode);
17211 in = force_reg (inmode, operands[1]);
17212 mode = GET_MODE (out);
17213 neglab = gen_label_rtx ();
17214 donelab = gen_label_rtx ();
17215 i1 = gen_reg_rtx (Pmode);
17216 f0 = gen_reg_rtx (mode);
17218 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
17220 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
17221 emit_jump_insn (gen_jump (donelab));
17224 emit_label (neglab);
17226 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17227 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17228 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
17229 expand_float (f0, i0, 0);
17230 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
17232 emit_label (donelab);
17235 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17236 with all elements equal to VAR. Return true if successful. */
17239 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
17240 rtx target, rtx val)
17242 enum machine_mode smode, wsmode, wvmode;
17249 if (!mmx_ok && !TARGET_SSE)
17257 val = force_reg (GET_MODE_INNER (mode), val);
17258 x = gen_rtx_VEC_DUPLICATE (mode, val);
17259 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17265 if (TARGET_SSE || TARGET_3DNOW_A)
17267 val = gen_lowpart (SImode, val);
17268 x = gen_rtx_TRUNCATE (HImode, val);
17269 x = gen_rtx_VEC_DUPLICATE (mode, x);
17270 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17299 /* Replicate the value once into the next wider mode and recurse. */
17300 val = convert_modes (wsmode, smode, val, true);
17301 x = expand_simple_binop (wsmode, ASHIFT, val,
17302 GEN_INT (GET_MODE_BITSIZE (smode)),
17303 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17304 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
17306 x = gen_reg_rtx (wvmode);
17307 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
17308 gcc_unreachable ();
17309 emit_move_insn (target, gen_lowpart (mode, x));
17317 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17318 whose low element is VAR, and other elements are zero. Return true
17322 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
17323 rtx target, rtx var)
17325 enum machine_mode vsimode;
17332 if (!mmx_ok && !TARGET_SSE)
17338 var = force_reg (GET_MODE_INNER (mode), var);
17339 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
17340 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17345 var = force_reg (GET_MODE_INNER (mode), var);
17346 x = gen_rtx_VEC_DUPLICATE (mode, var);
17347 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
17348 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17353 vsimode = V4SImode;
17359 vsimode = V2SImode;
17362 /* Zero extend the variable element to SImode and recurse. */
17363 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
17365 x = gen_reg_rtx (vsimode);
17366 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
17367 gcc_unreachable ();
17369 emit_move_insn (target, gen_lowpart (mode, x));
17377 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17378 consisting of the values in VALS. It is known that all elements
17379 except ONE_VAR are constants. Return true if successful. */
17382 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
17383 rtx target, rtx vals, int one_var)
17385 rtx var = XVECEXP (vals, 0, one_var);
17386 enum machine_mode wmode;
17389 const_vec = copy_rtx (vals);
17390 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
17391 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
17399 /* For the two element vectors, it's just as easy to use
17400 the general case. */
17416 /* There's no way to set one QImode entry easily. Combine
17417 the variable value with its adjacent constant value, and
17418 promote to an HImode set. */
17419 x = XVECEXP (vals, 0, one_var ^ 1);
17422 var = convert_modes (HImode, QImode, var, true);
17423 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
17424 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17425 x = GEN_INT (INTVAL (x) & 0xff);
17429 var = convert_modes (HImode, QImode, var, true);
17430 x = gen_int_mode (INTVAL (x) << 8, HImode);
17432 if (x != const0_rtx)
17433 var = expand_simple_binop (HImode, IOR, var, x, var,
17434 1, OPTAB_LIB_WIDEN);
17436 x = gen_reg_rtx (wmode);
17437 emit_move_insn (x, gen_lowpart (wmode, const_vec));
17438 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
17440 emit_move_insn (target, gen_lowpart (mode, x));
17447 emit_move_insn (target, const_vec);
17448 ix86_expand_vector_set (mmx_ok, target, var, one_var);
17452 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
17453 all values variable, and none identical. */
17456 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
17457 rtx target, rtx vals)
17459 enum machine_mode half_mode = GET_MODE_INNER (mode);
17460 rtx op0 = NULL, op1 = NULL;
17461 bool use_vec_concat = false;
17467 if (!mmx_ok && !TARGET_SSE)
17473 /* For the two element vectors, we always implement VEC_CONCAT. */
17474 op0 = XVECEXP (vals, 0, 0);
17475 op1 = XVECEXP (vals, 0, 1);
17476 use_vec_concat = true;
17480 half_mode = V2SFmode;
17483 half_mode = V2SImode;
17489 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
17490 Recurse to load the two halves. */
17492 op0 = gen_reg_rtx (half_mode);
17493 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
17494 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
17496 op1 = gen_reg_rtx (half_mode);
17497 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
17498 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
17500 use_vec_concat = true;
17511 gcc_unreachable ();
17514 if (use_vec_concat)
17516 if (!register_operand (op0, half_mode))
17517 op0 = force_reg (half_mode, op0);
17518 if (!register_operand (op1, half_mode))
17519 op1 = force_reg (half_mode, op1);
17521 emit_insn (gen_rtx_SET (VOIDmode, target,
17522 gen_rtx_VEC_CONCAT (mode, op0, op1)));
17526 int i, j, n_elts, n_words, n_elt_per_word;
17527 enum machine_mode inner_mode;
17528 rtx words[4], shift;
17530 inner_mode = GET_MODE_INNER (mode);
17531 n_elts = GET_MODE_NUNITS (mode);
17532 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
17533 n_elt_per_word = n_elts / n_words;
17534 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
17536 for (i = 0; i < n_words; ++i)
17538 rtx word = NULL_RTX;
17540 for (j = 0; j < n_elt_per_word; ++j)
17542 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
17543 elt = convert_modes (word_mode, inner_mode, elt, true);
17549 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
17550 word, 1, OPTAB_LIB_WIDEN);
17551 word = expand_simple_binop (word_mode, IOR, word, elt,
17552 word, 1, OPTAB_LIB_WIDEN);
17560 emit_move_insn (target, gen_lowpart (mode, words[0]));
17561 else if (n_words == 2)
17563 rtx tmp = gen_reg_rtx (mode);
17564 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
17565 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
17566 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
17567 emit_move_insn (target, tmp);
17569 else if (n_words == 4)
17571 rtx tmp = gen_reg_rtx (V4SImode);
17572 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
17573 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
17574 emit_move_insn (target, gen_lowpart (mode, tmp));
17577 gcc_unreachable ();
17581 /* Initialize vector TARGET via VALS. Suppress the use of MMX
17582 instructions unless MMX_OK is true. */
17585 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
17587 enum machine_mode mode = GET_MODE (target);
17588 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17589 int n_elts = GET_MODE_NUNITS (mode);
17590 int n_var = 0, one_var = -1;
17591 bool all_same = true, all_const_zero = true;
17595 for (i = 0; i < n_elts; ++i)
17597 x = XVECEXP (vals, 0, i);
17598 if (!CONSTANT_P (x))
17599 n_var++, one_var = i;
17600 else if (x != CONST0_RTX (inner_mode))
17601 all_const_zero = false;
17602 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
17606 /* Constants are best loaded from the constant pool. */
17609 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
17613 /* If all values are identical, broadcast the value. */
17615 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
17616 XVECEXP (vals, 0, 0)))
17619 /* Values where only one field is non-constant are best loaded from
17620 the pool and overwritten via move later. */
17623 if (all_const_zero && one_var == 0
17624 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
17625 XVECEXP (vals, 0, 0)))
17628 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
17632 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
17636 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
17638 enum machine_mode mode = GET_MODE (target);
17639 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17640 bool use_vec_merge = false;
17649 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
17650 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
17652 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
17654 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
17655 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17665 /* For the two element vectors, we implement a VEC_CONCAT with
17666 the extraction of the other element. */
17668 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
17669 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
17672 op0 = val, op1 = tmp;
17674 op0 = tmp, op1 = val;
17676 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
17677 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17685 use_vec_merge = true;
17689 /* tmp = target = A B C D */
17690 tmp = copy_to_reg (target);
17691 /* target = A A B B */
17692 emit_insn (gen_sse_unpcklps (target, target, target));
17693 /* target = X A B B */
17694 ix86_expand_vector_set (false, target, val, 0);
17695 /* target = A X C D */
17696 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17697 GEN_INT (1), GEN_INT (0),
17698 GEN_INT (2+4), GEN_INT (3+4)));
17702 /* tmp = target = A B C D */
17703 tmp = copy_to_reg (target);
17704 /* tmp = X B C D */
17705 ix86_expand_vector_set (false, tmp, val, 0);
17706 /* target = A B X D */
17707 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17708 GEN_INT (0), GEN_INT (1),
17709 GEN_INT (0+4), GEN_INT (3+4)));
17713 /* tmp = target = A B C D */
17714 tmp = copy_to_reg (target);
17715 /* tmp = X B C D */
17716 ix86_expand_vector_set (false, tmp, val, 0);
17717 /* target = A B X D */
17718 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17719 GEN_INT (0), GEN_INT (1),
17720 GEN_INT (2+4), GEN_INT (0+4)));
17724 gcc_unreachable ();
17729 /* Element 0 handled by vec_merge below. */
17732 use_vec_merge = true;
17738 /* With SSE2, use integer shuffles to swap element 0 and ELT,
17739 store into element 0, then shuffle them back. */
17743 order[0] = GEN_INT (elt);
17744 order[1] = const1_rtx;
17745 order[2] = const2_rtx;
17746 order[3] = GEN_INT (3);
17747 order[elt] = const0_rtx;
17749 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17750 order[1], order[2], order[3]));
17752 ix86_expand_vector_set (false, target, val, 0);
17754 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17755 order[1], order[2], order[3]));
17759 /* For SSE1, we have to reuse the V4SF code. */
17760 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
17761 gen_lowpart (SFmode, val), elt);
17766 use_vec_merge = TARGET_SSE2;
17769 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17780 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
17781 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
17782 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17786 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17788 emit_move_insn (mem, target);
17790 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17791 emit_move_insn (tmp, val);
17793 emit_move_insn (target, mem);
17798 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
17800 enum machine_mode mode = GET_MODE (vec);
17801 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17802 bool use_vec_extr = false;
17815 use_vec_extr = true;
17827 tmp = gen_reg_rtx (mode);
17828 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
17829 GEN_INT (elt), GEN_INT (elt),
17830 GEN_INT (elt+4), GEN_INT (elt+4)));
17834 tmp = gen_reg_rtx (mode);
17835 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
17839 gcc_unreachable ();
17842 use_vec_extr = true;
17857 tmp = gen_reg_rtx (mode);
17858 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
17859 GEN_INT (elt), GEN_INT (elt),
17860 GEN_INT (elt), GEN_INT (elt)));
17864 tmp = gen_reg_rtx (mode);
17865 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
17869 gcc_unreachable ();
17872 use_vec_extr = true;
17877 /* For SSE1, we have to reuse the V4SF code. */
17878 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
17879 gen_lowpart (V4SFmode, vec), elt);
17885 use_vec_extr = TARGET_SSE2;
17888 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17893 /* ??? Could extract the appropriate HImode element and shift. */
17900 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
17901 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
17903 /* Let the rtl optimizers know about the zero extension performed. */
17904 if (inner_mode == HImode)
17906 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
17907 target = gen_lowpart (SImode, target);
17910 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17914 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17916 emit_move_insn (mem, vec);
17918 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17919 emit_move_insn (target, tmp);
17923 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
17924 pattern to reduce; DEST is the destination; IN is the input vector. */
17927 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
17929 rtx tmp1, tmp2, tmp3;
17931 tmp1 = gen_reg_rtx (V4SFmode);
17932 tmp2 = gen_reg_rtx (V4SFmode);
17933 tmp3 = gen_reg_rtx (V4SFmode);
17935 emit_insn (gen_sse_movhlps (tmp1, in, in));
17936 emit_insn (fn (tmp2, tmp1, in));
17938 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
17939 GEN_INT (1), GEN_INT (1),
17940 GEN_INT (1+4), GEN_INT (1+4)));
17941 emit_insn (fn (dest, tmp2, tmp3));
17944 /* Implements target hook vector_mode_supported_p. */
17946 ix86_vector_mode_supported_p (enum machine_mode mode)
17948 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
17950 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
17952 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
17954 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
17959 /* Worker function for TARGET_MD_ASM_CLOBBERS.
17961 We do this in the new i386 backend to maintain source compatibility
17962 with the old cc0-based compiler. */
17965 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
17966 tree inputs ATTRIBUTE_UNUSED,
17969 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
17971 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
17973 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
17978 /* Return true if this goes in small data/bss. */
17981 ix86_in_large_data_p (tree exp)
17983 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
17986 /* Functions are never large data. */
17987 if (TREE_CODE (exp) == FUNCTION_DECL)
17990 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
17992 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
17993 if (strcmp (section, ".ldata") == 0
17994 || strcmp (section, ".lbss") == 0)
18000 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
18002 /* If this is an incomplete type with size 0, then we can't put it
18003 in data because it might be too big when completed. */
18004 if (!size || size > ix86_section_threshold)
18011 ix86_encode_section_info (tree decl, rtx rtl, int first)
18013 default_encode_section_info (decl, rtl, first);
18015 if (TREE_CODE (decl) == VAR_DECL
18016 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
18017 && ix86_in_large_data_p (decl))
18018 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
18021 /* Worker function for REVERSE_CONDITION. */
18024 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
18026 return (mode != CCFPmode && mode != CCFPUmode
18027 ? reverse_condition (code)
18028 : reverse_condition_maybe_unordered (code));
18031 /* Output code to perform an x87 FP register move, from OPERANDS[1]
18035 output_387_reg_move (rtx insn, rtx *operands)
18037 if (REG_P (operands[1])
18038 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
18040 if (REGNO (operands[0]) == FIRST_STACK_REG
18041 && TARGET_USE_FFREEP)
18042 return "ffreep\t%y0";
18043 return "fstp\t%y0";
18045 if (STACK_TOP_P (operands[0]))
18046 return "fld%z1\t%y1";
18050 /* Output code to perform a conditional jump to LABEL, if C2 flag in
18051 FP status register is set. */
18054 ix86_emit_fp_unordered_jump (rtx label)
18056 rtx reg = gen_reg_rtx (HImode);
18059 emit_insn (gen_x86_fnstsw_1 (reg));
18061 if (TARGET_USE_SAHF)
18063 emit_insn (gen_x86_sahf_1 (reg));
18065 temp = gen_rtx_REG (CCmode, FLAGS_REG);
18066 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
18070 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
18072 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18073 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
18076 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
18077 gen_rtx_LABEL_REF (VOIDmode, label),
18079 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
18080 emit_jump_insn (temp);
18083 /* Output code to perform a log1p XFmode calculation. */
18085 void ix86_emit_i387_log1p (rtx op0, rtx op1)
18087 rtx label1 = gen_label_rtx ();
18088 rtx label2 = gen_label_rtx ();
18090 rtx tmp = gen_reg_rtx (XFmode);
18091 rtx tmp2 = gen_reg_rtx (XFmode);
18093 emit_insn (gen_absxf2 (tmp, op1));
18094 emit_insn (gen_cmpxf (tmp,
18095 CONST_DOUBLE_FROM_REAL_VALUE (
18096 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
18098 emit_jump_insn (gen_bge (label1));
18100 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
18101 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
18102 emit_jump (label2);
18104 emit_label (label1);
18105 emit_move_insn (tmp, CONST1_RTX (XFmode));
18106 emit_insn (gen_addxf3 (tmp, op1, tmp));
18107 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
18108 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
18110 emit_label (label2);
18113 /* Solaris named-section hook. Parameters are as for
18114 named_section_real. */
18117 i386_solaris_elf_named_section (const char *name, unsigned int flags,
18120 /* With Binutils 2.15, the "@unwind" marker must be specified on
18121 every occurrence of the ".eh_frame" section, not just the first
18124 && strcmp (name, ".eh_frame") == 0)
18126 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
18127 flags & SECTION_WRITE ? "aw" : "a");
18130 default_elf_asm_named_section (name, flags, decl);
18133 /* Return the mangling of TYPE if it is an extended fundamental type. */
18135 static const char *
18136 ix86_mangle_fundamental_type (tree type)
18138 switch (TYPE_MODE (type))
18141 /* __float128 is "g". */
18144 /* "long double" or __float80 is "e". */
18151 /* For 32-bit code we can save PIC register setup by using
18152 __stack_chk_fail_local hidden function instead of calling
18153 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
18154 register, so it is better to call __stack_chk_fail directly. */
18157 ix86_stack_protect_fail (void)
18159 return TARGET_64BIT
18160 ? default_external_stack_protect_fail ()
18161 : default_hidden_stack_protect_fail ();
18164 /* Select a format to encode pointers in exception handling data. CODE
18165 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
18166 true if the symbol may be affected by dynamic relocations.
18168 ??? All x86 object file formats are capable of representing this.
18169 After all, the relocation needed is the same as for the call insn.
18170 Whether or not a particular assembler allows us to enter such, I
18171 guess we'll have to see. */
18173 asm_preferred_eh_data_format (int code, int global)
18177 int type = DW_EH_PE_sdata8;
18179 || ix86_cmodel == CM_SMALL_PIC
18180 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
18181 type = DW_EH_PE_sdata4;
18182 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
18184 if (ix86_cmodel == CM_SMALL
18185 || (ix86_cmodel == CM_MEDIUM && code))
18186 return DW_EH_PE_udata4;
18187 return DW_EH_PE_absptr;
18190 #include "gt-i386.h"