1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
50 #include "tree-gimple.h"
52 #include "tm-constrs.h"
55 #ifndef CHECK_STACK_LIMIT
56 #define CHECK_STACK_LIMIT (-1)
59 /* Return index of given mode in mult and division cost tables. */
60 #define MODE_INDEX(mode) \
61 ((mode) == QImode ? 0 \
62 : (mode) == HImode ? 1 \
63 : (mode) == SImode ? 2 \
64 : (mode) == DImode ? 3 \
67 /* Processor costs (relative to an add) */
68 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
69 #define COSTS_N_BYTES(N) ((N) * 2)
71 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
74 struct processor_costs size_cost = { /* costs for tuning for size */
75 COSTS_N_BYTES (2), /* cost of an add instruction */
76 COSTS_N_BYTES (3), /* cost of a lea instruction */
77 COSTS_N_BYTES (2), /* variable shift costs */
78 COSTS_N_BYTES (3), /* constant shift costs */
79 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
80 COSTS_N_BYTES (3), /* HI */
81 COSTS_N_BYTES (3), /* SI */
82 COSTS_N_BYTES (3), /* DI */
83 COSTS_N_BYTES (5)}, /* other */
84 0, /* cost of multiply per each bit set */
85 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 COSTS_N_BYTES (3), /* cost of movsx */
91 COSTS_N_BYTES (3), /* cost of movzx */
94 2, /* cost for loading QImode using movzbl */
95 {2, 2, 2}, /* cost of loading integer registers
96 in QImode, HImode and SImode.
97 Relative to reg-reg move (2). */
98 {2, 2, 2}, /* cost of storing integer registers */
99 2, /* cost of reg,reg fld/fst */
100 {2, 2, 2}, /* cost of loading fp registers
101 in SFmode, DFmode and XFmode */
102 {2, 2, 2}, /* cost of storing fp registers
103 in SFmode, DFmode and XFmode */
104 3, /* cost of moving MMX register */
105 {3, 3}, /* cost of loading MMX registers
106 in SImode and DImode */
107 {3, 3}, /* cost of storing MMX registers
108 in SImode and DImode */
109 3, /* cost of moving SSE register */
110 {3, 3, 3}, /* cost of loading SSE registers
111 in SImode, DImode and TImode */
112 {3, 3, 3}, /* cost of storing SSE registers
113 in SImode, DImode and TImode */
114 3, /* MMX or SSE register to integer */
115 0, /* size of prefetch block */
116 0, /* number of parallel prefetches */
118 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
119 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
120 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
121 COSTS_N_BYTES (2), /* cost of FABS instruction. */
122 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
123 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
124 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
125 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
126 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
127 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}
130 /* Processor costs (relative to an add) */
132 struct processor_costs i386_cost = { /* 386 specific costs */
133 COSTS_N_INSNS (1), /* cost of an add instruction */
134 COSTS_N_INSNS (1), /* cost of a lea instruction */
135 COSTS_N_INSNS (3), /* variable shift costs */
136 COSTS_N_INSNS (2), /* constant shift costs */
137 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
138 COSTS_N_INSNS (6), /* HI */
139 COSTS_N_INSNS (6), /* SI */
140 COSTS_N_INSNS (6), /* DI */
141 COSTS_N_INSNS (6)}, /* other */
142 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
143 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
144 COSTS_N_INSNS (23), /* HI */
145 COSTS_N_INSNS (23), /* SI */
146 COSTS_N_INSNS (23), /* DI */
147 COSTS_N_INSNS (23)}, /* other */
148 COSTS_N_INSNS (3), /* cost of movsx */
149 COSTS_N_INSNS (2), /* cost of movzx */
150 15, /* "large" insn */
152 4, /* cost for loading QImode using movzbl */
153 {2, 4, 2}, /* cost of loading integer registers
154 in QImode, HImode and SImode.
155 Relative to reg-reg move (2). */
156 {2, 4, 2}, /* cost of storing integer registers */
157 2, /* cost of reg,reg fld/fst */
158 {8, 8, 8}, /* cost of loading fp registers
159 in SFmode, DFmode and XFmode */
160 {8, 8, 8}, /* cost of storing fp registers
161 in SFmode, DFmode and XFmode */
162 2, /* cost of moving MMX register */
163 {4, 8}, /* cost of loading MMX registers
164 in SImode and DImode */
165 {4, 8}, /* cost of storing MMX registers
166 in SImode and DImode */
167 2, /* cost of moving SSE register */
168 {4, 8, 16}, /* cost of loading SSE registers
169 in SImode, DImode and TImode */
170 {4, 8, 16}, /* cost of storing SSE registers
171 in SImode, DImode and TImode */
172 3, /* MMX or SSE register to integer */
173 0, /* size of prefetch block */
174 0, /* number of parallel prefetches */
176 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
177 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
178 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
179 COSTS_N_INSNS (22), /* cost of FABS instruction. */
180 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
181 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
182 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
183 DUMMY_STRINGOP_ALGS},
184 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
185 DUMMY_STRINGOP_ALGS},
189 struct processor_costs i486_cost = { /* 486 specific costs */
190 COSTS_N_INSNS (1), /* cost of an add instruction */
191 COSTS_N_INSNS (1), /* cost of a lea instruction */
192 COSTS_N_INSNS (3), /* variable shift costs */
193 COSTS_N_INSNS (2), /* constant shift costs */
194 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
195 COSTS_N_INSNS (12), /* HI */
196 COSTS_N_INSNS (12), /* SI */
197 COSTS_N_INSNS (12), /* DI */
198 COSTS_N_INSNS (12)}, /* other */
199 1, /* cost of multiply per each bit set */
200 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
201 COSTS_N_INSNS (40), /* HI */
202 COSTS_N_INSNS (40), /* SI */
203 COSTS_N_INSNS (40), /* DI */
204 COSTS_N_INSNS (40)}, /* other */
205 COSTS_N_INSNS (3), /* cost of movsx */
206 COSTS_N_INSNS (2), /* cost of movzx */
207 15, /* "large" insn */
209 4, /* cost for loading QImode using movzbl */
210 {2, 4, 2}, /* cost of loading integer registers
211 in QImode, HImode and SImode.
212 Relative to reg-reg move (2). */
213 {2, 4, 2}, /* cost of storing integer registers */
214 2, /* cost of reg,reg fld/fst */
215 {8, 8, 8}, /* cost of loading fp registers
216 in SFmode, DFmode and XFmode */
217 {8, 8, 8}, /* cost of storing fp registers
218 in SFmode, DFmode and XFmode */
219 2, /* cost of moving MMX register */
220 {4, 8}, /* cost of loading MMX registers
221 in SImode and DImode */
222 {4, 8}, /* cost of storing MMX registers
223 in SImode and DImode */
224 2, /* cost of moving SSE register */
225 {4, 8, 16}, /* cost of loading SSE registers
226 in SImode, DImode and TImode */
227 {4, 8, 16}, /* cost of storing SSE registers
228 in SImode, DImode and TImode */
229 3, /* MMX or SSE register to integer */
230 0, /* size of prefetch block */
231 0, /* number of parallel prefetches */
233 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
234 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
235 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
236 COSTS_N_INSNS (3), /* cost of FABS instruction. */
237 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
238 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
239 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
240 DUMMY_STRINGOP_ALGS},
241 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
246 struct processor_costs pentium_cost = {
247 COSTS_N_INSNS (1), /* cost of an add instruction */
248 COSTS_N_INSNS (1), /* cost of a lea instruction */
249 COSTS_N_INSNS (4), /* variable shift costs */
250 COSTS_N_INSNS (1), /* constant shift costs */
251 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
252 COSTS_N_INSNS (11), /* HI */
253 COSTS_N_INSNS (11), /* SI */
254 COSTS_N_INSNS (11), /* DI */
255 COSTS_N_INSNS (11)}, /* other */
256 0, /* cost of multiply per each bit set */
257 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
258 COSTS_N_INSNS (25), /* HI */
259 COSTS_N_INSNS (25), /* SI */
260 COSTS_N_INSNS (25), /* DI */
261 COSTS_N_INSNS (25)}, /* other */
262 COSTS_N_INSNS (3), /* cost of movsx */
263 COSTS_N_INSNS (2), /* cost of movzx */
264 8, /* "large" insn */
266 6, /* cost for loading QImode using movzbl */
267 {2, 4, 2}, /* cost of loading integer registers
268 in QImode, HImode and SImode.
269 Relative to reg-reg move (2). */
270 {2, 4, 2}, /* cost of storing integer registers */
271 2, /* cost of reg,reg fld/fst */
272 {2, 2, 6}, /* cost of loading fp registers
273 in SFmode, DFmode and XFmode */
274 {4, 4, 6}, /* cost of storing fp registers
275 in SFmode, DFmode and XFmode */
276 8, /* cost of moving MMX register */
277 {8, 8}, /* cost of loading MMX registers
278 in SImode and DImode */
279 {8, 8}, /* cost of storing MMX registers
280 in SImode and DImode */
281 2, /* cost of moving SSE register */
282 {4, 8, 16}, /* cost of loading SSE registers
283 in SImode, DImode and TImode */
284 {4, 8, 16}, /* cost of storing SSE registers
285 in SImode, DImode and TImode */
286 3, /* MMX or SSE register to integer */
287 0, /* size of prefetch block */
288 0, /* number of parallel prefetches */
290 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
291 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
292 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
293 COSTS_N_INSNS (1), /* cost of FABS instruction. */
294 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
295 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
296 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
297 DUMMY_STRINGOP_ALGS},
298 {{libcall, {{-1, rep_prefix_4_byte}}},
303 struct processor_costs pentiumpro_cost = {
304 COSTS_N_INSNS (1), /* cost of an add instruction */
305 COSTS_N_INSNS (1), /* cost of a lea instruction */
306 COSTS_N_INSNS (1), /* variable shift costs */
307 COSTS_N_INSNS (1), /* constant shift costs */
308 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
309 COSTS_N_INSNS (4), /* HI */
310 COSTS_N_INSNS (4), /* SI */
311 COSTS_N_INSNS (4), /* DI */
312 COSTS_N_INSNS (4)}, /* other */
313 0, /* cost of multiply per each bit set */
314 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
315 COSTS_N_INSNS (17), /* HI */
316 COSTS_N_INSNS (17), /* SI */
317 COSTS_N_INSNS (17), /* DI */
318 COSTS_N_INSNS (17)}, /* other */
319 COSTS_N_INSNS (1), /* cost of movsx */
320 COSTS_N_INSNS (1), /* cost of movzx */
321 8, /* "large" insn */
323 2, /* cost for loading QImode using movzbl */
324 {4, 4, 4}, /* cost of loading integer registers
325 in QImode, HImode and SImode.
326 Relative to reg-reg move (2). */
327 {2, 2, 2}, /* cost of storing integer registers */
328 2, /* cost of reg,reg fld/fst */
329 {2, 2, 6}, /* cost of loading fp registers
330 in SFmode, DFmode and XFmode */
331 {4, 4, 6}, /* cost of storing fp registers
332 in SFmode, DFmode and XFmode */
333 2, /* cost of moving MMX register */
334 {2, 2}, /* cost of loading MMX registers
335 in SImode and DImode */
336 {2, 2}, /* cost of storing MMX registers
337 in SImode and DImode */
338 2, /* cost of moving SSE register */
339 {2, 2, 8}, /* cost of loading SSE registers
340 in SImode, DImode and TImode */
341 {2, 2, 8}, /* cost of storing SSE registers
342 in SImode, DImode and TImode */
343 3, /* MMX or SSE register to integer */
344 32, /* size of prefetch block */
345 6, /* number of parallel prefetches */
347 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
348 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
349 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
350 COSTS_N_INSNS (2), /* cost of FABS instruction. */
351 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
352 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
353 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
354 the alignment). For small blocks inline loop is still a noticeable win, for bigger
355 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
356 more expensive startup time in CPU, but after 4K the difference is down in the noise.
358 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
359 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
360 DUMMY_STRINGOP_ALGS},
361 {{rep_prefix_4_byte, {{1024, unrolled_loop},
362 {8192, rep_prefix_4_byte}, {-1, libcall}}},
367 struct processor_costs geode_cost = {
368 COSTS_N_INSNS (1), /* cost of an add instruction */
369 COSTS_N_INSNS (1), /* cost of a lea instruction */
370 COSTS_N_INSNS (2), /* variable shift costs */
371 COSTS_N_INSNS (1), /* constant shift costs */
372 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
373 COSTS_N_INSNS (4), /* HI */
374 COSTS_N_INSNS (7), /* SI */
375 COSTS_N_INSNS (7), /* DI */
376 COSTS_N_INSNS (7)}, /* other */
377 0, /* cost of multiply per each bit set */
378 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
379 COSTS_N_INSNS (23), /* HI */
380 COSTS_N_INSNS (39), /* SI */
381 COSTS_N_INSNS (39), /* DI */
382 COSTS_N_INSNS (39)}, /* other */
383 COSTS_N_INSNS (1), /* cost of movsx */
384 COSTS_N_INSNS (1), /* cost of movzx */
385 8, /* "large" insn */
387 1, /* cost for loading QImode using movzbl */
388 {1, 1, 1}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {1, 1, 1}, /* cost of storing integer registers */
392 1, /* cost of reg,reg fld/fst */
393 {1, 1, 1}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {4, 6, 6}, /* cost of storing fp registers
396 in SFmode, DFmode and XFmode */
398 1, /* cost of moving MMX register */
399 {1, 1}, /* cost of loading MMX registers
400 in SImode and DImode */
401 {1, 1}, /* cost of storing MMX registers
402 in SImode and DImode */
403 1, /* cost of moving SSE register */
404 {1, 1, 1}, /* cost of loading SSE registers
405 in SImode, DImode and TImode */
406 {1, 1, 1}, /* cost of storing SSE registers
407 in SImode, DImode and TImode */
408 1, /* MMX or SSE register to integer */
409 32, /* size of prefetch block */
410 1, /* number of parallel prefetches */
412 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
413 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
414 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
415 COSTS_N_INSNS (1), /* cost of FABS instruction. */
416 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
417 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
418 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
419 DUMMY_STRINGOP_ALGS},
420 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
425 struct processor_costs k6_cost = {
426 COSTS_N_INSNS (1), /* cost of an add instruction */
427 COSTS_N_INSNS (2), /* cost of a lea instruction */
428 COSTS_N_INSNS (1), /* variable shift costs */
429 COSTS_N_INSNS (1), /* constant shift costs */
430 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
431 COSTS_N_INSNS (3), /* HI */
432 COSTS_N_INSNS (3), /* SI */
433 COSTS_N_INSNS (3), /* DI */
434 COSTS_N_INSNS (3)}, /* other */
435 0, /* cost of multiply per each bit set */
436 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
437 COSTS_N_INSNS (18), /* HI */
438 COSTS_N_INSNS (18), /* SI */
439 COSTS_N_INSNS (18), /* DI */
440 COSTS_N_INSNS (18)}, /* other */
441 COSTS_N_INSNS (2), /* cost of movsx */
442 COSTS_N_INSNS (2), /* cost of movzx */
443 8, /* "large" insn */
445 3, /* cost for loading QImode using movzbl */
446 {4, 5, 4}, /* cost of loading integer registers
447 in QImode, HImode and SImode.
448 Relative to reg-reg move (2). */
449 {2, 3, 2}, /* cost of storing integer registers */
450 4, /* cost of reg,reg fld/fst */
451 {6, 6, 6}, /* cost of loading fp registers
452 in SFmode, DFmode and XFmode */
453 {4, 4, 4}, /* cost of storing fp registers
454 in SFmode, DFmode and XFmode */
455 2, /* cost of moving MMX register */
456 {2, 2}, /* cost of loading MMX registers
457 in SImode and DImode */
458 {2, 2}, /* cost of storing MMX registers
459 in SImode and DImode */
460 2, /* cost of moving SSE register */
461 {2, 2, 8}, /* cost of loading SSE registers
462 in SImode, DImode and TImode */
463 {2, 2, 8}, /* cost of storing SSE registers
464 in SImode, DImode and TImode */
465 6, /* MMX or SSE register to integer */
466 32, /* size of prefetch block */
467 1, /* number of parallel prefetches */
469 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
470 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
471 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
474 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
475 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
476 DUMMY_STRINGOP_ALGS},
477 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
482 struct processor_costs athlon_cost = {
483 COSTS_N_INSNS (1), /* cost of an add instruction */
484 COSTS_N_INSNS (2), /* cost of a lea instruction */
485 COSTS_N_INSNS (1), /* variable shift costs */
486 COSTS_N_INSNS (1), /* constant shift costs */
487 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
488 COSTS_N_INSNS (5), /* HI */
489 COSTS_N_INSNS (5), /* SI */
490 COSTS_N_INSNS (5), /* DI */
491 COSTS_N_INSNS (5)}, /* other */
492 0, /* cost of multiply per each bit set */
493 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
494 COSTS_N_INSNS (26), /* HI */
495 COSTS_N_INSNS (42), /* SI */
496 COSTS_N_INSNS (74), /* DI */
497 COSTS_N_INSNS (74)}, /* other */
498 COSTS_N_INSNS (1), /* cost of movsx */
499 COSTS_N_INSNS (1), /* cost of movzx */
500 8, /* "large" insn */
502 4, /* cost for loading QImode using movzbl */
503 {3, 4, 3}, /* cost of loading integer registers
504 in QImode, HImode and SImode.
505 Relative to reg-reg move (2). */
506 {3, 4, 3}, /* cost of storing integer registers */
507 4, /* cost of reg,reg fld/fst */
508 {4, 4, 12}, /* cost of loading fp registers
509 in SFmode, DFmode and XFmode */
510 {6, 6, 8}, /* cost of storing fp registers
511 in SFmode, DFmode and XFmode */
512 2, /* cost of moving MMX register */
513 {4, 4}, /* cost of loading MMX registers
514 in SImode and DImode */
515 {4, 4}, /* cost of storing MMX registers
516 in SImode and DImode */
517 2, /* cost of moving SSE register */
518 {4, 4, 6}, /* cost of loading SSE registers
519 in SImode, DImode and TImode */
520 {4, 4, 5}, /* cost of storing SSE registers
521 in SImode, DImode and TImode */
522 5, /* MMX or SSE register to integer */
523 64, /* size of prefetch block */
524 6, /* number of parallel prefetches */
526 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
527 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
528 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
529 COSTS_N_INSNS (2), /* cost of FABS instruction. */
530 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
531 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
532 /* For some reason, Athlon deals better with REP prefix (relative to loops)
533 compared to K8. Alignment becomes important after 8 bytes for memcpy and
534 128 bytes for memset. */
535 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
536 DUMMY_STRINGOP_ALGS},
537 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
542 struct processor_costs k8_cost = {
543 COSTS_N_INSNS (1), /* cost of an add instruction */
544 COSTS_N_INSNS (2), /* cost of a lea instruction */
545 COSTS_N_INSNS (1), /* variable shift costs */
546 COSTS_N_INSNS (1), /* constant shift costs */
547 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
548 COSTS_N_INSNS (4), /* HI */
549 COSTS_N_INSNS (3), /* SI */
550 COSTS_N_INSNS (4), /* DI */
551 COSTS_N_INSNS (5)}, /* other */
552 0, /* cost of multiply per each bit set */
553 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
554 COSTS_N_INSNS (26), /* HI */
555 COSTS_N_INSNS (42), /* SI */
556 COSTS_N_INSNS (74), /* DI */
557 COSTS_N_INSNS (74)}, /* other */
558 COSTS_N_INSNS (1), /* cost of movsx */
559 COSTS_N_INSNS (1), /* cost of movzx */
560 8, /* "large" insn */
562 4, /* cost for loading QImode using movzbl */
563 {3, 4, 3}, /* cost of loading integer registers
564 in QImode, HImode and SImode.
565 Relative to reg-reg move (2). */
566 {3, 4, 3}, /* cost of storing integer registers */
567 4, /* cost of reg,reg fld/fst */
568 {4, 4, 12}, /* cost of loading fp registers
569 in SFmode, DFmode and XFmode */
570 {6, 6, 8}, /* cost of storing fp registers
571 in SFmode, DFmode and XFmode */
572 2, /* cost of moving MMX register */
573 {3, 3}, /* cost of loading MMX registers
574 in SImode and DImode */
575 {4, 4}, /* cost of storing MMX registers
576 in SImode and DImode */
577 2, /* cost of moving SSE register */
578 {4, 3, 6}, /* cost of loading SSE registers
579 in SImode, DImode and TImode */
580 {4, 4, 5}, /* cost of storing SSE registers
581 in SImode, DImode and TImode */
582 5, /* MMX or SSE register to integer */
583 64, /* size of prefetch block */
584 /* New AMD processors never drop prefetches; if they cannot be performed
585 immediately, they are queued. We set number of simultaneous prefetches
586 to a large constant to reflect this (it probably is not a good idea not
587 to limit number of prefetches at all, as their execution also takes some
589 100, /* number of parallel prefetches */
591 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
592 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
593 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
594 COSTS_N_INSNS (2), /* cost of FABS instruction. */
595 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
596 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
597 /* K8 has optimized REP instruction for medium sized blocks, but for very small
598 blocks it is better to use loop. For large blocks, libcall can do
599 nontemporary accesses and beat inline considerably. */
600 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
601 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
602 {{libcall, {{8, loop}, {24, unrolled_loop},
603 {2048, rep_prefix_4_byte}, {-1, libcall}}},
604 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
608 struct processor_costs pentium4_cost = {
609 COSTS_N_INSNS (1), /* cost of an add instruction */
610 COSTS_N_INSNS (3), /* cost of a lea instruction */
611 COSTS_N_INSNS (4), /* variable shift costs */
612 COSTS_N_INSNS (4), /* constant shift costs */
613 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
614 COSTS_N_INSNS (15), /* HI */
615 COSTS_N_INSNS (15), /* SI */
616 COSTS_N_INSNS (15), /* DI */
617 COSTS_N_INSNS (15)}, /* other */
618 0, /* cost of multiply per each bit set */
619 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
620 COSTS_N_INSNS (56), /* HI */
621 COSTS_N_INSNS (56), /* SI */
622 COSTS_N_INSNS (56), /* DI */
623 COSTS_N_INSNS (56)}, /* other */
624 COSTS_N_INSNS (1), /* cost of movsx */
625 COSTS_N_INSNS (1), /* cost of movzx */
626 16, /* "large" insn */
628 2, /* cost for loading QImode using movzbl */
629 {4, 5, 4}, /* cost of loading integer registers
630 in QImode, HImode and SImode.
631 Relative to reg-reg move (2). */
632 {2, 3, 2}, /* cost of storing integer registers */
633 2, /* cost of reg,reg fld/fst */
634 {2, 2, 6}, /* cost of loading fp registers
635 in SFmode, DFmode and XFmode */
636 {4, 4, 6}, /* cost of storing fp registers
637 in SFmode, DFmode and XFmode */
638 2, /* cost of moving MMX register */
639 {2, 2}, /* cost of loading MMX registers
640 in SImode and DImode */
641 {2, 2}, /* cost of storing MMX registers
642 in SImode and DImode */
643 12, /* cost of moving SSE register */
644 {12, 12, 12}, /* cost of loading SSE registers
645 in SImode, DImode and TImode */
646 {2, 2, 8}, /* cost of storing SSE registers
647 in SImode, DImode and TImode */
648 10, /* MMX or SSE register to integer */
649 64, /* size of prefetch block */
650 6, /* number of parallel prefetches */
652 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
653 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
654 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
655 COSTS_N_INSNS (2), /* cost of FABS instruction. */
656 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
657 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
658 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
659 DUMMY_STRINGOP_ALGS},
660 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
662 DUMMY_STRINGOP_ALGS},
666 struct processor_costs nocona_cost = {
667 COSTS_N_INSNS (1), /* cost of an add instruction */
668 COSTS_N_INSNS (1), /* cost of a lea instruction */
669 COSTS_N_INSNS (1), /* variable shift costs */
670 COSTS_N_INSNS (1), /* constant shift costs */
671 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
672 COSTS_N_INSNS (10), /* HI */
673 COSTS_N_INSNS (10), /* SI */
674 COSTS_N_INSNS (10), /* DI */
675 COSTS_N_INSNS (10)}, /* other */
676 0, /* cost of multiply per each bit set */
677 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
678 COSTS_N_INSNS (66), /* HI */
679 COSTS_N_INSNS (66), /* SI */
680 COSTS_N_INSNS (66), /* DI */
681 COSTS_N_INSNS (66)}, /* other */
682 COSTS_N_INSNS (1), /* cost of movsx */
683 COSTS_N_INSNS (1), /* cost of movzx */
684 16, /* "large" insn */
686 4, /* cost for loading QImode using movzbl */
687 {4, 4, 4}, /* cost of loading integer registers
688 in QImode, HImode and SImode.
689 Relative to reg-reg move (2). */
690 {4, 4, 4}, /* cost of storing integer registers */
691 3, /* cost of reg,reg fld/fst */
692 {12, 12, 12}, /* cost of loading fp registers
693 in SFmode, DFmode and XFmode */
694 {4, 4, 4}, /* cost of storing fp registers
695 in SFmode, DFmode and XFmode */
696 6, /* cost of moving MMX register */
697 {12, 12}, /* cost of loading MMX registers
698 in SImode and DImode */
699 {12, 12}, /* cost of storing MMX registers
700 in SImode and DImode */
701 6, /* cost of moving SSE register */
702 {12, 12, 12}, /* cost of loading SSE registers
703 in SImode, DImode and TImode */
704 {12, 12, 12}, /* cost of storing SSE registers
705 in SImode, DImode and TImode */
706 8, /* MMX or SSE register to integer */
707 128, /* size of prefetch block */
708 8, /* number of parallel prefetches */
710 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
711 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
712 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
713 COSTS_N_INSNS (3), /* cost of FABS instruction. */
714 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
715 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
716 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
718 {100000, unrolled_loop}, {-1, libcall}}}},
719 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
721 {libcall, {{24, loop}, {64, unrolled_loop},
722 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
726 struct processor_costs core2_cost = {
727 COSTS_N_INSNS (1), /* cost of an add instruction */
728 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
729 COSTS_N_INSNS (1), /* variable shift costs */
730 COSTS_N_INSNS (1), /* constant shift costs */
731 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
732 COSTS_N_INSNS (3), /* HI */
733 COSTS_N_INSNS (3), /* SI */
734 COSTS_N_INSNS (3), /* DI */
735 COSTS_N_INSNS (3)}, /* other */
736 0, /* cost of multiply per each bit set */
737 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
738 COSTS_N_INSNS (22), /* HI */
739 COSTS_N_INSNS (22), /* SI */
740 COSTS_N_INSNS (22), /* DI */
741 COSTS_N_INSNS (22)}, /* other */
742 COSTS_N_INSNS (1), /* cost of movsx */
743 COSTS_N_INSNS (1), /* cost of movzx */
744 8, /* "large" insn */
746 2, /* cost for loading QImode using movzbl */
747 {6, 6, 6}, /* cost of loading integer registers
748 in QImode, HImode and SImode.
749 Relative to reg-reg move (2). */
750 {4, 4, 4}, /* cost of storing integer registers */
751 2, /* cost of reg,reg fld/fst */
752 {6, 6, 6}, /* cost of loading fp registers
753 in SFmode, DFmode and XFmode */
754 {4, 4, 4}, /* cost of loading integer registers */
755 2, /* cost of moving MMX register */
756 {6, 6}, /* cost of loading MMX registers
757 in SImode and DImode */
758 {4, 4}, /* cost of storing MMX registers
759 in SImode and DImode */
760 2, /* cost of moving SSE register */
761 {6, 6, 6}, /* cost of loading SSE registers
762 in SImode, DImode and TImode */
763 {4, 4, 4}, /* cost of storing SSE registers
764 in SImode, DImode and TImode */
765 2, /* MMX or SSE register to integer */
766 128, /* size of prefetch block */
767 8, /* number of parallel prefetches */
769 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
770 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
771 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
772 COSTS_N_INSNS (1), /* cost of FABS instruction. */
773 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
774 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
775 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
776 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
777 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
778 {{libcall, {{8, loop}, {15, unrolled_loop},
779 {2048, rep_prefix_4_byte}, {-1, libcall}}},
780 {libcall, {{24, loop}, {32, unrolled_loop},
781 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
784 /* Generic64 should produce code tuned for Nocona and K8. */
786 struct processor_costs generic64_cost = {
787 COSTS_N_INSNS (1), /* cost of an add instruction */
788 /* On all chips taken into consideration lea is 2 cycles and more. With
789 this cost however our current implementation of synth_mult results in
790 use of unnecessary temporary registers causing regression on several
791 SPECfp benchmarks. */
792 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
793 COSTS_N_INSNS (1), /* variable shift costs */
794 COSTS_N_INSNS (1), /* constant shift costs */
795 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
796 COSTS_N_INSNS (4), /* HI */
797 COSTS_N_INSNS (3), /* SI */
798 COSTS_N_INSNS (4), /* DI */
799 COSTS_N_INSNS (2)}, /* other */
800 0, /* cost of multiply per each bit set */
801 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
802 COSTS_N_INSNS (26), /* HI */
803 COSTS_N_INSNS (42), /* SI */
804 COSTS_N_INSNS (74), /* DI */
805 COSTS_N_INSNS (74)}, /* other */
806 COSTS_N_INSNS (1), /* cost of movsx */
807 COSTS_N_INSNS (1), /* cost of movzx */
808 8, /* "large" insn */
810 4, /* cost for loading QImode using movzbl */
811 {4, 4, 4}, /* cost of loading integer registers
812 in QImode, HImode and SImode.
813 Relative to reg-reg move (2). */
814 {4, 4, 4}, /* cost of storing integer registers */
815 4, /* cost of reg,reg fld/fst */
816 {12, 12, 12}, /* cost of loading fp registers
817 in SFmode, DFmode and XFmode */
818 {6, 6, 8}, /* cost of storing fp registers
819 in SFmode, DFmode and XFmode */
820 2, /* cost of moving MMX register */
821 {8, 8}, /* cost of loading MMX registers
822 in SImode and DImode */
823 {8, 8}, /* cost of storing MMX registers
824 in SImode and DImode */
825 2, /* cost of moving SSE register */
826 {8, 8, 8}, /* cost of loading SSE registers
827 in SImode, DImode and TImode */
828 {8, 8, 8}, /* cost of storing SSE registers
829 in SImode, DImode and TImode */
830 5, /* MMX or SSE register to integer */
831 64, /* size of prefetch block */
832 6, /* number of parallel prefetches */
833 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
834 is increased to perhaps more appropriate value of 5. */
836 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
837 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
838 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
839 COSTS_N_INSNS (8), /* cost of FABS instruction. */
840 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
841 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
842 {DUMMY_STRINGOP_ALGS,
843 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
844 {DUMMY_STRINGOP_ALGS,
845 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
848 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
850 struct processor_costs generic32_cost = {
851 COSTS_N_INSNS (1), /* cost of an add instruction */
852 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
853 COSTS_N_INSNS (1), /* variable shift costs */
854 COSTS_N_INSNS (1), /* constant shift costs */
855 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
856 COSTS_N_INSNS (4), /* HI */
857 COSTS_N_INSNS (3), /* SI */
858 COSTS_N_INSNS (4), /* DI */
859 COSTS_N_INSNS (2)}, /* other */
860 0, /* cost of multiply per each bit set */
861 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
862 COSTS_N_INSNS (26), /* HI */
863 COSTS_N_INSNS (42), /* SI */
864 COSTS_N_INSNS (74), /* DI */
865 COSTS_N_INSNS (74)}, /* other */
866 COSTS_N_INSNS (1), /* cost of movsx */
867 COSTS_N_INSNS (1), /* cost of movzx */
868 8, /* "large" insn */
870 4, /* cost for loading QImode using movzbl */
871 {4, 4, 4}, /* cost of loading integer registers
872 in QImode, HImode and SImode.
873 Relative to reg-reg move (2). */
874 {4, 4, 4}, /* cost of storing integer registers */
875 4, /* cost of reg,reg fld/fst */
876 {12, 12, 12}, /* cost of loading fp registers
877 in SFmode, DFmode and XFmode */
878 {6, 6, 8}, /* cost of storing fp registers
879 in SFmode, DFmode and XFmode */
880 2, /* cost of moving MMX register */
881 {8, 8}, /* cost of loading MMX registers
882 in SImode and DImode */
883 {8, 8}, /* cost of storing MMX registers
884 in SImode and DImode */
885 2, /* cost of moving SSE register */
886 {8, 8, 8}, /* cost of loading SSE registers
887 in SImode, DImode and TImode */
888 {8, 8, 8}, /* cost of storing SSE registers
889 in SImode, DImode and TImode */
890 5, /* MMX or SSE register to integer */
891 64, /* size of prefetch block */
892 6, /* number of parallel prefetches */
894 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
895 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
896 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
897 COSTS_N_INSNS (8), /* cost of FABS instruction. */
898 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
899 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
900 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
901 DUMMY_STRINGOP_ALGS},
902 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
903 DUMMY_STRINGOP_ALGS},
906 const struct processor_costs *ix86_cost = &pentium_cost;
908 /* Processor feature/optimization bitmasks. */
909 #define m_386 (1<<PROCESSOR_I386)
910 #define m_486 (1<<PROCESSOR_I486)
911 #define m_PENT (1<<PROCESSOR_PENTIUM)
912 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
913 #define m_GEODE (1<<PROCESSOR_GEODE)
914 #define m_K6_GEODE (m_K6 | m_GEODE)
915 #define m_K6 (1<<PROCESSOR_K6)
916 #define m_ATHLON (1<<PROCESSOR_ATHLON)
917 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
918 #define m_K8 (1<<PROCESSOR_K8)
919 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
920 #define m_NOCONA (1<<PROCESSOR_NOCONA)
921 #define m_CORE2 (1<<PROCESSOR_CORE2)
922 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
923 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
924 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
926 /* Generic instruction choice should be common subset of supported CPUs
927 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
929 /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
930 Generic64 seems like good code size tradeoff. We can't enable it for 32bit
931 generic because it is not working well with PPro base chips. */
932 const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
933 const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
934 const int x86_zero_extend_with_and = m_486 | m_PENT;
935 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
936 const int x86_double_with_add = ~m_386;
937 const int x86_use_bit_test = m_386;
938 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
939 const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
940 const int x86_3dnow_a = m_ATHLON_K8;
941 const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
942 /* Branch hints were put in P4 based on simulation result. But
943 after P4 was made, no performance benefit was observed with
944 branch hints. It also increases the code size. As the result,
945 icc never generates branch hints. */
946 const int x86_branch_hints = 0;
947 const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
948 /* We probably ought to watch for partial register stalls on Generic32
949 compilation setting as well. However in current implementation the
950 partial register stalls are not eliminated very well - they can
951 be introduced via subregs synthesized by combine and can happen
952 in caller/callee saving sequences.
953 Because this option pays back little on PPro based chips and is in conflict
954 with partial reg. dependencies used by Athlon/P4 based chips, it is better
955 to leave it off for generic32 for now. */
956 const int x86_partial_reg_stall = m_PPRO;
957 const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
958 const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
959 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
960 const int x86_use_mov0 = m_K6;
961 const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
962 const int x86_read_modify_write = ~m_PENT;
963 const int x86_read_modify = ~(m_PENT | m_PPRO);
964 const int x86_split_long_moves = m_PPRO;
965 const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
966 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
967 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
968 const int x86_qimode_math = ~(0);
969 const int x86_promote_qi_regs = 0;
970 /* On PPro this flag is meant to avoid partial register stalls. Just like
971 the x86_partial_reg_stall this option might be considered for Generic32
972 if our scheme for avoiding partial stalls was more effective. */
973 const int x86_himode_math = ~(m_PPRO);
974 const int x86_promote_hi_regs = m_PPRO;
975 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
976 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
977 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
978 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
979 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
980 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
981 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
982 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
983 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
984 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
985 const int x86_shift1 = ~m_486;
986 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
987 /* In Generic model we have an conflict here in between PPro/Pentium4 based chips
988 that thread 128bit SSE registers as single units versus K8 based chips that
989 divide SSE registers to two 64bit halves.
990 x86_sse_partial_reg_dependency promote all store destinations to be 128bit
991 to allow register renaming on 128bit SSE units, but usually results in one
992 extra microop on 64bit SSE units. Experimental results shows that disabling
993 this option on P4 brings over 20% SPECfp regression, while enabling it on
994 K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
996 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
997 /* Set for machines where the type and dependencies are resolved on SSE
998 register parts instead of whole registers, so we may maintain just
999 lower part of scalar values in proper format leaving the upper part
1001 const int x86_sse_split_regs = m_ATHLON_K8;
1002 const int x86_sse_typeless_stores = m_ATHLON_K8;
1003 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
1004 const int x86_use_ffreep = m_ATHLON_K8;
1005 const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
1007 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
1008 integer data in xmm registers. Which results in pretty abysmal code. */
1009 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
1011 const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1012 /* Some CPU cores are not able to predict more than 4 branch instructions in
1013 the 16 byte window. */
1014 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
1015 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
1016 const int x86_use_bt = m_ATHLON_K8;
1017 /* Compare and exchange was added for 80486. */
1018 const int x86_cmpxchg = ~m_386;
1019 /* Compare and exchange 8 bytes was added for pentium. */
1020 const int x86_cmpxchg8b = ~(m_386 | m_486);
1021 /* Compare and exchange 16 bytes was added for nocona. */
1022 const int x86_cmpxchg16b = m_NOCONA;
1023 /* Exchange and add was added for 80486. */
1024 const int x86_xadd = ~m_386;
1025 /* Byteswap was added for 80486. */
1026 const int x86_bswap = ~m_386;
1027 const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
1029 static enum stringop_alg stringop_alg = no_stringop;
1031 /* In case the average insn count for single function invocation is
1032 lower than this constant, emit fast (but longer) prologue and
1034 #define FAST_PROLOGUE_INSN_COUNT 20
1036 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1037 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1038 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1039 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1041 /* Array of the smallest class containing reg number REGNO, indexed by
1042 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1044 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1046 /* ax, dx, cx, bx */
1047 AREG, DREG, CREG, BREG,
1048 /* si, di, bp, sp */
1049 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1051 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1052 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1055 /* flags, fpsr, fpcr, frame */
1056 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1057 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1059 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1061 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1062 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1063 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1067 /* The "default" register map used in 32bit mode. */
1069 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1071 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1072 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1073 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1074 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1075 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1076 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1077 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1080 static int const x86_64_int_parameter_registers[6] =
1082 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1083 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1086 static int const x86_64_int_return_registers[4] =
1088 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
1091 /* The "default" register map used in 64bit mode. */
1092 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1094 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1095 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1096 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1097 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1098 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1099 8,9,10,11,12,13,14,15, /* extended integer registers */
1100 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1103 /* Define the register numbers to be used in Dwarf debugging information.
1104 The SVR4 reference port C compiler uses the following register numbers
1105 in its Dwarf output code:
1106 0 for %eax (gcc regno = 0)
1107 1 for %ecx (gcc regno = 2)
1108 2 for %edx (gcc regno = 1)
1109 3 for %ebx (gcc regno = 3)
1110 4 for %esp (gcc regno = 7)
1111 5 for %ebp (gcc regno = 6)
1112 6 for %esi (gcc regno = 4)
1113 7 for %edi (gcc regno = 5)
1114 The following three DWARF register numbers are never generated by
1115 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1116 believes these numbers have these meanings.
1117 8 for %eip (no gcc equivalent)
1118 9 for %eflags (gcc regno = 17)
1119 10 for %trapno (no gcc equivalent)
1120 It is not at all clear how we should number the FP stack registers
1121 for the x86 architecture. If the version of SDB on x86/svr4 were
1122 a bit less brain dead with respect to floating-point then we would
1123 have a precedent to follow with respect to DWARF register numbers
1124 for x86 FP registers, but the SDB on x86/svr4 is so completely
1125 broken with respect to FP registers that it is hardly worth thinking
1126 of it as something to strive for compatibility with.
1127 The version of x86/svr4 SDB I have at the moment does (partially)
1128 seem to believe that DWARF register number 11 is associated with
1129 the x86 register %st(0), but that's about all. Higher DWARF
1130 register numbers don't seem to be associated with anything in
1131 particular, and even for DWARF regno 11, SDB only seems to under-
1132 stand that it should say that a variable lives in %st(0) (when
1133 asked via an `=' command) if we said it was in DWARF regno 11,
1134 but SDB still prints garbage when asked for the value of the
1135 variable in question (via a `/' command).
1136 (Also note that the labels SDB prints for various FP stack regs
1137 when doing an `x' command are all wrong.)
1138 Note that these problems generally don't affect the native SVR4
1139 C compiler because it doesn't allow the use of -O with -g and
1140 because when it is *not* optimizing, it allocates a memory
1141 location for each floating-point variable, and the memory
1142 location is what gets described in the DWARF AT_location
1143 attribute for the variable in question.
1144 Regardless of the severe mental illness of the x86/svr4 SDB, we
1145 do something sensible here and we use the following DWARF
1146 register numbers. Note that these are all stack-top-relative
1148 11 for %st(0) (gcc regno = 8)
1149 12 for %st(1) (gcc regno = 9)
1150 13 for %st(2) (gcc regno = 10)
1151 14 for %st(3) (gcc regno = 11)
1152 15 for %st(4) (gcc regno = 12)
1153 16 for %st(5) (gcc regno = 13)
1154 17 for %st(6) (gcc regno = 14)
1155 18 for %st(7) (gcc regno = 15)
1157 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1159 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1160 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1161 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1162 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1163 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1164 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1165 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1168 /* Test and compare insns in i386.md store the information needed to
1169 generate branch and scc insns here. */
1171 rtx ix86_compare_op0 = NULL_RTX;
1172 rtx ix86_compare_op1 = NULL_RTX;
1173 rtx ix86_compare_emitted = NULL_RTX;
1175 /* Size of the register save area. */
1176 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1178 /* Define the structure for the machine field in struct function. */
1180 struct stack_local_entry GTY(())
1182 unsigned short mode;
1185 struct stack_local_entry *next;
1188 /* Structure describing stack frame layout.
1189 Stack grows downward:
1195 saved frame pointer if frame_pointer_needed
1196 <- HARD_FRAME_POINTER
1201 [va_arg registers] (
1202 > to_allocate <- FRAME_POINTER
1212 HOST_WIDE_INT frame;
1214 int outgoing_arguments_size;
1217 HOST_WIDE_INT to_allocate;
1218 /* The offsets relative to ARG_POINTER. */
1219 HOST_WIDE_INT frame_pointer_offset;
1220 HOST_WIDE_INT hard_frame_pointer_offset;
1221 HOST_WIDE_INT stack_pointer_offset;
1223 /* When save_regs_using_mov is set, emit prologue using
1224 move instead of push instructions. */
1225 bool save_regs_using_mov;
1228 /* Code model option. */
1229 enum cmodel ix86_cmodel;
1231 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1233 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1235 /* Which unit we are generating floating point math for. */
1236 enum fpmath_unit ix86_fpmath;
1238 /* Which cpu are we scheduling for. */
1239 enum processor_type ix86_tune;
1240 /* Which instruction set architecture to use. */
1241 enum processor_type ix86_arch;
1243 /* true if sse prefetch instruction is not NOOP. */
1244 int x86_prefetch_sse;
1246 /* ix86_regparm_string as a number */
1247 static int ix86_regparm;
1249 /* -mstackrealign option */
1250 extern int ix86_force_align_arg_pointer;
1251 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1253 /* Preferred alignment for stack boundary in bits. */
1254 unsigned int ix86_preferred_stack_boundary;
1256 /* Values 1-5: see jump.c */
1257 int ix86_branch_cost;
1259 /* Variables which are this size or smaller are put in the data/bss
1260 or ldata/lbss sections. */
1262 int ix86_section_threshold = 65536;
1264 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1265 char internal_label_prefix[16];
1266 int internal_label_prefix_len;
1268 static bool ix86_handle_option (size_t, const char *, int);
1269 static void output_pic_addr_const (FILE *, rtx, int);
1270 static void put_condition_code (enum rtx_code, enum machine_mode,
1272 static const char *get_some_local_dynamic_name (void);
1273 static int get_some_local_dynamic_name_1 (rtx *, void *);
1274 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
1275 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
1277 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
1278 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
1280 static rtx get_thread_pointer (int);
1281 static rtx legitimize_tls_address (rtx, enum tls_model, int);
1282 static void get_pc_thunk_name (char [32], unsigned int);
1283 static rtx gen_push (rtx);
1284 static int ix86_flags_dependent (rtx, rtx, enum attr_type);
1285 static int ix86_agi_dependent (rtx, rtx, enum attr_type);
1286 static struct machine_function * ix86_init_machine_status (void);
1287 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
1288 static int ix86_nsaved_regs (void);
1289 static void ix86_emit_save_regs (void);
1290 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
1291 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
1292 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
1293 static HOST_WIDE_INT ix86_GOT_alias_set (void);
1294 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
1295 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
1296 static int ix86_issue_rate (void);
1297 static int ix86_adjust_cost (rtx, rtx, rtx, int);
1298 static int ia32_multipass_dfa_lookahead (void);
1299 static void ix86_init_mmx_sse_builtins (void);
1300 static rtx x86_this_parameter (tree);
1301 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
1302 HOST_WIDE_INT, tree);
1303 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
1304 static void x86_file_start (void);
1305 static void ix86_reorg (void);
1306 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
1307 static tree ix86_build_builtin_va_list (void);
1308 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
1310 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
1311 static bool ix86_scalar_mode_supported_p (enum machine_mode);
1312 static bool ix86_vector_mode_supported_p (enum machine_mode);
1314 static int ix86_address_cost (rtx);
1315 static bool ix86_cannot_force_const_mem (rtx);
1316 static rtx ix86_delegitimize_address (rtx);
1318 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
1320 struct builtin_description;
1321 static rtx ix86_expand_sse_comi (const struct builtin_description *,
1323 static rtx ix86_expand_sse_compare (const struct builtin_description *,
1325 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
1326 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
1327 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
1328 static rtx ix86_expand_store_builtin (enum insn_code, tree);
1329 static rtx safe_vector_operand (rtx, enum machine_mode);
1330 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
1331 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
1332 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
1333 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
1334 static int ix86_fp_comparison_cost (enum rtx_code code);
1335 static unsigned int ix86_select_alt_pic_regnum (void);
1336 static int ix86_save_reg (unsigned int, int);
1337 static void ix86_compute_frame_layout (struct ix86_frame *);
1338 static int ix86_comp_type_attributes (tree, tree);
1339 static int ix86_function_regparm (tree, tree);
1340 const struct attribute_spec ix86_attribute_table[];
1341 static bool ix86_function_ok_for_sibcall (tree, tree);
1342 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
1343 static int ix86_value_regno (enum machine_mode, tree, tree);
1344 static bool contains_128bit_aligned_vector_p (tree);
1345 static rtx ix86_struct_value_rtx (tree, int);
1346 static bool ix86_ms_bitfield_layout_p (tree);
1347 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
1348 static int extended_reg_mentioned_1 (rtx *, void *);
1349 static bool ix86_rtx_costs (rtx, int, int, int *);
1350 static int min_insn_size (rtx);
1351 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
1352 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
1353 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
1355 static void ix86_init_builtins (void);
1356 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
1357 static tree ix86_builtin_vectorized_function (enum built_in_function, tree);
1358 static const char *ix86_mangle_fundamental_type (tree);
1359 static tree ix86_stack_protect_fail (void);
1360 static rtx ix86_internal_arg_pointer (void);
1361 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
1363 /* This function is only used on Solaris. */
1364 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
1367 /* Register class used for passing given 64bit part of the argument.
1368 These represent classes as documented by the PS ABI, with the exception
1369 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1370 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1372 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1373 whenever possible (upper half does contain padding).
1375 enum x86_64_reg_class
1378 X86_64_INTEGER_CLASS,
1379 X86_64_INTEGERSI_CLASS,
1386 X86_64_COMPLEX_X87_CLASS,
1389 static const char * const x86_64_reg_class_name[] = {
1390 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1391 "sseup", "x87", "x87up", "cplx87", "no"
1394 #define MAX_CLASSES 4
1396 /* Table of constants used by fldpi, fldln2, etc.... */
1397 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1398 static bool ext_80387_constants_init = 0;
1399 static void init_ext_80387_constants (void);
1400 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
1401 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
1402 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
1403 static section *x86_64_elf_select_section (tree decl, int reloc,
1404 unsigned HOST_WIDE_INT align)
1407 /* Initialize the GCC target structure. */
1408 #undef TARGET_ATTRIBUTE_TABLE
1409 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
1410 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1411 # undef TARGET_MERGE_DECL_ATTRIBUTES
1412 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
1415 #undef TARGET_COMP_TYPE_ATTRIBUTES
1416 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
1418 #undef TARGET_INIT_BUILTINS
1419 #define TARGET_INIT_BUILTINS ix86_init_builtins
1420 #undef TARGET_EXPAND_BUILTIN
1421 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
1422 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1423 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION ix86_builtin_vectorized_function
1425 #undef TARGET_ASM_FUNCTION_EPILOGUE
1426 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
1428 #undef TARGET_ENCODE_SECTION_INFO
1429 #ifndef SUBTARGET_ENCODE_SECTION_INFO
1430 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
1432 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
1435 #undef TARGET_ASM_OPEN_PAREN
1436 #define TARGET_ASM_OPEN_PAREN ""
1437 #undef TARGET_ASM_CLOSE_PAREN
1438 #define TARGET_ASM_CLOSE_PAREN ""
1440 #undef TARGET_ASM_ALIGNED_HI_OP
1441 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
1442 #undef TARGET_ASM_ALIGNED_SI_OP
1443 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
1445 #undef TARGET_ASM_ALIGNED_DI_OP
1446 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1449 #undef TARGET_ASM_UNALIGNED_HI_OP
1450 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1451 #undef TARGET_ASM_UNALIGNED_SI_OP
1452 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1453 #undef TARGET_ASM_UNALIGNED_DI_OP
1454 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1456 #undef TARGET_SCHED_ADJUST_COST
1457 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1458 #undef TARGET_SCHED_ISSUE_RATE
1459 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1460 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1461 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1462 ia32_multipass_dfa_lookahead
1464 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1465 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1468 #undef TARGET_HAVE_TLS
1469 #define TARGET_HAVE_TLS true
1471 #undef TARGET_CANNOT_FORCE_CONST_MEM
1472 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1473 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1474 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
1476 #undef TARGET_DELEGITIMIZE_ADDRESS
1477 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1479 #undef TARGET_MS_BITFIELD_LAYOUT_P
1480 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1483 #undef TARGET_BINDS_LOCAL_P
1484 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1487 #undef TARGET_ASM_OUTPUT_MI_THUNK
1488 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1489 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1490 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1492 #undef TARGET_ASM_FILE_START
1493 #define TARGET_ASM_FILE_START x86_file_start
1495 #undef TARGET_DEFAULT_TARGET_FLAGS
1496 #define TARGET_DEFAULT_TARGET_FLAGS \
1498 | TARGET_64BIT_DEFAULT \
1499 | TARGET_SUBTARGET_DEFAULT \
1500 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1502 #undef TARGET_HANDLE_OPTION
1503 #define TARGET_HANDLE_OPTION ix86_handle_option
1505 #undef TARGET_RTX_COSTS
1506 #define TARGET_RTX_COSTS ix86_rtx_costs
1507 #undef TARGET_ADDRESS_COST
1508 #define TARGET_ADDRESS_COST ix86_address_cost
1510 #undef TARGET_FIXED_CONDITION_CODE_REGS
1511 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1512 #undef TARGET_CC_MODES_COMPATIBLE
1513 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1515 #undef TARGET_MACHINE_DEPENDENT_REORG
1516 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1518 #undef TARGET_BUILD_BUILTIN_VA_LIST
1519 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1521 #undef TARGET_MD_ASM_CLOBBERS
1522 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1524 #undef TARGET_PROMOTE_PROTOTYPES
1525 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1526 #undef TARGET_STRUCT_VALUE_RTX
1527 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1528 #undef TARGET_SETUP_INCOMING_VARARGS
1529 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1530 #undef TARGET_MUST_PASS_IN_STACK
1531 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1532 #undef TARGET_PASS_BY_REFERENCE
1533 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1534 #undef TARGET_INTERNAL_ARG_POINTER
1535 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1536 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1537 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1539 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1540 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1542 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1543 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
1545 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1546 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1549 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1550 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1553 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1554 #undef TARGET_INSERT_ATTRIBUTES
1555 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1558 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1559 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1561 #undef TARGET_STACK_PROTECT_FAIL
1562 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1564 #undef TARGET_FUNCTION_VALUE
1565 #define TARGET_FUNCTION_VALUE ix86_function_value
1567 struct gcc_target targetm = TARGET_INITIALIZER;
1570 /* The svr4 ABI for the i386 says that records and unions are returned
1572 #ifndef DEFAULT_PCC_STRUCT_RETURN
1573 #define DEFAULT_PCC_STRUCT_RETURN 1
1576 /* Implement TARGET_HANDLE_OPTION. */
1579 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1586 target_flags &= ~MASK_3DNOW_A;
1587 target_flags_explicit |= MASK_3DNOW_A;
1594 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1595 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1602 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1603 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1610 target_flags &= ~MASK_SSE3;
1611 target_flags_explicit |= MASK_SSE3;
1620 /* Sometimes certain combinations of command options do not make
1621 sense on a particular target machine. You can define a macro
1622 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1623 defined, is executed once just after all the command options have
1626 Don't use this macro to turn on various extra optimizations for
1627 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1630 override_options (void)
1633 int ix86_tune_defaulted = 0;
1635 /* Comes from final.c -- no real reason to change it. */
1636 #define MAX_CODE_ALIGN 16
1640 const struct processor_costs *cost; /* Processor costs */
1641 const int target_enable; /* Target flags to enable. */
1642 const int target_disable; /* Target flags to disable. */
1643 const int align_loop; /* Default alignments. */
1644 const int align_loop_max_skip;
1645 const int align_jump;
1646 const int align_jump_max_skip;
1647 const int align_func;
1649 const processor_target_table[PROCESSOR_max] =
1651 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1652 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1653 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1654 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1655 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1656 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1657 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1658 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1659 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1660 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1661 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1662 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1663 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
1666 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1669 const char *const name; /* processor name or nickname. */
1670 const enum processor_type processor;
1671 const enum pta_flags
1677 PTA_PREFETCH_SSE = 16,
1684 const processor_alias_table[] =
1686 {"i386", PROCESSOR_I386, 0},
1687 {"i486", PROCESSOR_I486, 0},
1688 {"i586", PROCESSOR_PENTIUM, 0},
1689 {"pentium", PROCESSOR_PENTIUM, 0},
1690 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1691 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1692 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1693 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1694 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1695 {"i686", PROCESSOR_PENTIUMPRO, 0},
1696 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1697 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1698 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1699 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1700 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1701 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1702 | PTA_MMX | PTA_PREFETCH_SSE},
1703 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1704 | PTA_MMX | PTA_PREFETCH_SSE},
1705 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1706 | PTA_MMX | PTA_PREFETCH_SSE},
1707 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1708 | PTA_MMX | PTA_PREFETCH_SSE},
1709 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3
1710 | PTA_64BIT | PTA_MMX
1711 | PTA_PREFETCH_SSE},
1712 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1714 {"k6", PROCESSOR_K6, PTA_MMX},
1715 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1716 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1717 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1719 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1720 | PTA_3DNOW | PTA_3DNOW_A},
1721 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1722 | PTA_3DNOW_A | PTA_SSE},
1723 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1724 | PTA_3DNOW_A | PTA_SSE},
1725 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1726 | PTA_3DNOW_A | PTA_SSE},
1727 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1728 | PTA_SSE | PTA_SSE2 },
1729 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1730 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1731 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1732 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1733 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1734 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1735 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1736 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1737 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1738 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1741 int const pta_size = ARRAY_SIZE (processor_alias_table);
1743 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1744 SUBTARGET_OVERRIDE_OPTIONS;
1747 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1748 SUBSUBTARGET_OVERRIDE_OPTIONS;
1751 /* -fPIC is the default for x86_64. */
1752 if (TARGET_MACHO && TARGET_64BIT)
1755 /* Set the default values for switches whose default depends on TARGET_64BIT
1756 in case they weren't overwritten by command line options. */
1759 /* Mach-O doesn't support omitting the frame pointer for now. */
1760 if (flag_omit_frame_pointer == 2)
1761 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
1762 if (flag_asynchronous_unwind_tables == 2)
1763 flag_asynchronous_unwind_tables = 1;
1764 if (flag_pcc_struct_return == 2)
1765 flag_pcc_struct_return = 0;
1769 if (flag_omit_frame_pointer == 2)
1770 flag_omit_frame_pointer = 0;
1771 if (flag_asynchronous_unwind_tables == 2)
1772 flag_asynchronous_unwind_tables = 0;
1773 if (flag_pcc_struct_return == 2)
1774 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1777 /* Need to check -mtune=generic first. */
1778 if (ix86_tune_string)
1780 if (!strcmp (ix86_tune_string, "generic")
1781 || !strcmp (ix86_tune_string, "i686")
1782 /* As special support for cross compilers we read -mtune=native
1783 as -mtune=generic. With native compilers we won't see the
1784 -mtune=native, as it was changed by the driver. */
1785 || !strcmp (ix86_tune_string, "native"))
1788 ix86_tune_string = "generic64";
1790 ix86_tune_string = "generic32";
1792 else if (!strncmp (ix86_tune_string, "generic", 7))
1793 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1797 if (ix86_arch_string)
1798 ix86_tune_string = ix86_arch_string;
1799 if (!ix86_tune_string)
1801 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1802 ix86_tune_defaulted = 1;
1805 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
1806 need to use a sensible tune option. */
1807 if (!strcmp (ix86_tune_string, "generic")
1808 || !strcmp (ix86_tune_string, "x86-64")
1809 || !strcmp (ix86_tune_string, "i686"))
1812 ix86_tune_string = "generic64";
1814 ix86_tune_string = "generic32";
1817 if (ix86_stringop_string)
1819 if (!strcmp (ix86_stringop_string, "rep_byte"))
1820 stringop_alg = rep_prefix_1_byte;
1821 else if (!strcmp (ix86_stringop_string, "libcall"))
1822 stringop_alg = libcall;
1823 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
1824 stringop_alg = rep_prefix_4_byte;
1825 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
1826 stringop_alg = rep_prefix_8_byte;
1827 else if (!strcmp (ix86_stringop_string, "byte_loop"))
1828 stringop_alg = loop_1_byte;
1829 else if (!strcmp (ix86_stringop_string, "loop"))
1830 stringop_alg = loop;
1831 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
1832 stringop_alg = unrolled_loop;
1834 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
1836 if (!strcmp (ix86_tune_string, "x86-64"))
1837 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
1838 "-mtune=generic instead as appropriate.");
1840 if (!ix86_arch_string)
1841 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1842 if (!strcmp (ix86_arch_string, "generic"))
1843 error ("generic CPU can be used only for -mtune= switch");
1844 if (!strncmp (ix86_arch_string, "generic", 7))
1845 error ("bad value (%s) for -march= switch", ix86_arch_string);
1847 if (ix86_cmodel_string != 0)
1849 if (!strcmp (ix86_cmodel_string, "small"))
1850 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1851 else if (!strcmp (ix86_cmodel_string, "medium"))
1852 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1854 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1855 else if (!strcmp (ix86_cmodel_string, "32"))
1856 ix86_cmodel = CM_32;
1857 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1858 ix86_cmodel = CM_KERNEL;
1859 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1860 ix86_cmodel = CM_LARGE;
1862 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1866 ix86_cmodel = CM_32;
1868 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1870 if (ix86_asm_string != 0)
1873 && !strcmp (ix86_asm_string, "intel"))
1874 ix86_asm_dialect = ASM_INTEL;
1875 else if (!strcmp (ix86_asm_string, "att"))
1876 ix86_asm_dialect = ASM_ATT;
1878 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1880 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1881 error ("code model %qs not supported in the %s bit mode",
1882 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1883 if (ix86_cmodel == CM_LARGE)
1884 sorry ("code model %<large%> not supported yet");
1885 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1886 sorry ("%i-bit mode not compiled in",
1887 (target_flags & MASK_64BIT) ? 64 : 32);
1889 for (i = 0; i < pta_size; i++)
1890 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1892 ix86_arch = processor_alias_table[i].processor;
1893 /* Default cpu tuning to the architecture. */
1894 ix86_tune = ix86_arch;
1895 if (processor_alias_table[i].flags & PTA_MMX
1896 && !(target_flags_explicit & MASK_MMX))
1897 target_flags |= MASK_MMX;
1898 if (processor_alias_table[i].flags & PTA_3DNOW
1899 && !(target_flags_explicit & MASK_3DNOW))
1900 target_flags |= MASK_3DNOW;
1901 if (processor_alias_table[i].flags & PTA_3DNOW_A
1902 && !(target_flags_explicit & MASK_3DNOW_A))
1903 target_flags |= MASK_3DNOW_A;
1904 if (processor_alias_table[i].flags & PTA_SSE
1905 && !(target_flags_explicit & MASK_SSE))
1906 target_flags |= MASK_SSE;
1907 if (processor_alias_table[i].flags & PTA_SSE2
1908 && !(target_flags_explicit & MASK_SSE2))
1909 target_flags |= MASK_SSE2;
1910 if (processor_alias_table[i].flags & PTA_SSE3
1911 && !(target_flags_explicit & MASK_SSE3))
1912 target_flags |= MASK_SSE3;
1913 if (processor_alias_table[i].flags & PTA_SSSE3
1914 && !(target_flags_explicit & MASK_SSSE3))
1915 target_flags |= MASK_SSSE3;
1916 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1917 x86_prefetch_sse = true;
1918 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1919 error ("CPU you selected does not support x86-64 "
1925 error ("bad value (%s) for -march= switch", ix86_arch_string);
1927 for (i = 0; i < pta_size; i++)
1928 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1930 ix86_tune = processor_alias_table[i].processor;
1931 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1933 if (ix86_tune_defaulted)
1935 ix86_tune_string = "x86-64";
1936 for (i = 0; i < pta_size; i++)
1937 if (! strcmp (ix86_tune_string,
1938 processor_alias_table[i].name))
1940 ix86_tune = processor_alias_table[i].processor;
1943 error ("CPU you selected does not support x86-64 "
1946 /* Intel CPUs have always interpreted SSE prefetch instructions as
1947 NOPs; so, we can enable SSE prefetch instructions even when
1948 -mtune (rather than -march) points us to a processor that has them.
1949 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1950 higher processors. */
1951 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1952 x86_prefetch_sse = true;
1956 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1959 ix86_cost = &size_cost;
1961 ix86_cost = processor_target_table[ix86_tune].cost;
1962 target_flags |= processor_target_table[ix86_tune].target_enable;
1963 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1965 /* Arrange to set up i386_stack_locals for all functions. */
1966 init_machine_status = ix86_init_machine_status;
1968 /* Validate -mregparm= value. */
1969 if (ix86_regparm_string)
1971 i = atoi (ix86_regparm_string);
1972 if (i < 0 || i > REGPARM_MAX)
1973 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1979 ix86_regparm = REGPARM_MAX;
1981 /* If the user has provided any of the -malign-* options,
1982 warn and use that value only if -falign-* is not set.
1983 Remove this code in GCC 3.2 or later. */
1984 if (ix86_align_loops_string)
1986 warning (0, "-malign-loops is obsolete, use -falign-loops");
1987 if (align_loops == 0)
1989 i = atoi (ix86_align_loops_string);
1990 if (i < 0 || i > MAX_CODE_ALIGN)
1991 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1993 align_loops = 1 << i;
1997 if (ix86_align_jumps_string)
1999 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2000 if (align_jumps == 0)
2002 i = atoi (ix86_align_jumps_string);
2003 if (i < 0 || i > MAX_CODE_ALIGN)
2004 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2006 align_jumps = 1 << i;
2010 if (ix86_align_funcs_string)
2012 warning (0, "-malign-functions is obsolete, use -falign-functions");
2013 if (align_functions == 0)
2015 i = atoi (ix86_align_funcs_string);
2016 if (i < 0 || i > MAX_CODE_ALIGN)
2017 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2019 align_functions = 1 << i;
2023 /* Default align_* from the processor table. */
2024 if (align_loops == 0)
2026 align_loops = processor_target_table[ix86_tune].align_loop;
2027 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2029 if (align_jumps == 0)
2031 align_jumps = processor_target_table[ix86_tune].align_jump;
2032 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2034 if (align_functions == 0)
2036 align_functions = processor_target_table[ix86_tune].align_func;
2039 /* Validate -mbranch-cost= value, or provide default. */
2040 ix86_branch_cost = ix86_cost->branch_cost;
2041 if (ix86_branch_cost_string)
2043 i = atoi (ix86_branch_cost_string);
2045 error ("-mbranch-cost=%d is not between 0 and 5", i);
2047 ix86_branch_cost = i;
2049 if (ix86_section_threshold_string)
2051 i = atoi (ix86_section_threshold_string);
2053 error ("-mlarge-data-threshold=%d is negative", i);
2055 ix86_section_threshold = i;
2058 if (ix86_tls_dialect_string)
2060 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2061 ix86_tls_dialect = TLS_DIALECT_GNU;
2062 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2063 ix86_tls_dialect = TLS_DIALECT_GNU2;
2064 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2065 ix86_tls_dialect = TLS_DIALECT_SUN;
2067 error ("bad value (%s) for -mtls-dialect= switch",
2068 ix86_tls_dialect_string);
2071 /* Keep nonleaf frame pointers. */
2072 if (flag_omit_frame_pointer)
2073 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2074 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2075 flag_omit_frame_pointer = 1;
2077 /* If we're doing fast math, we don't care about comparison order
2078 wrt NaNs. This lets us use a shorter comparison sequence. */
2079 if (flag_finite_math_only)
2080 target_flags &= ~MASK_IEEE_FP;
2082 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2083 since the insns won't need emulation. */
2084 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
2085 target_flags &= ~MASK_NO_FANCY_MATH_387;
2087 /* Likewise, if the target doesn't have a 387, or we've specified
2088 software floating point, don't use 387 inline intrinsics. */
2090 target_flags |= MASK_NO_FANCY_MATH_387;
2092 /* Turn on SSE3 builtins for -mssse3. */
2094 target_flags |= MASK_SSE3;
2096 /* Turn on SSE2 builtins for -msse3. */
2098 target_flags |= MASK_SSE2;
2100 /* Turn on SSE builtins for -msse2. */
2102 target_flags |= MASK_SSE;
2104 /* Turn on MMX builtins for -msse. */
2107 target_flags |= MASK_MMX & ~target_flags_explicit;
2108 x86_prefetch_sse = true;
2111 /* Turn on MMX builtins for 3Dnow. */
2113 target_flags |= MASK_MMX;
2117 if (TARGET_ALIGN_DOUBLE)
2118 error ("-malign-double makes no sense in the 64bit mode");
2120 error ("-mrtd calling convention not supported in the 64bit mode");
2122 /* Enable by default the SSE and MMX builtins. Do allow the user to
2123 explicitly disable any of these. In particular, disabling SSE and
2124 MMX for kernel code is extremely useful. */
2126 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
2127 & ~target_flags_explicit);
2131 /* i386 ABI does not specify red zone. It still makes sense to use it
2132 when programmer takes care to stack from being destroyed. */
2133 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2134 target_flags |= MASK_NO_RED_ZONE;
2137 /* Validate -mpreferred-stack-boundary= value, or provide default.
2138 The default of 128 bits is for Pentium III's SSE __m128. We can't
2139 change it because of optimize_size. Otherwise, we can't mix object
2140 files compiled with -Os and -On. */
2141 ix86_preferred_stack_boundary = 128;
2142 if (ix86_preferred_stack_boundary_string)
2144 i = atoi (ix86_preferred_stack_boundary_string);
2145 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2146 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2147 TARGET_64BIT ? 4 : 2);
2149 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2152 /* Accept -msseregparm only if at least SSE support is enabled. */
2153 if (TARGET_SSEREGPARM
2155 error ("-msseregparm used without SSE enabled");
2157 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2159 if (ix86_fpmath_string != 0)
2161 if (! strcmp (ix86_fpmath_string, "387"))
2162 ix86_fpmath = FPMATH_387;
2163 else if (! strcmp (ix86_fpmath_string, "sse"))
2167 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2168 ix86_fpmath = FPMATH_387;
2171 ix86_fpmath = FPMATH_SSE;
2173 else if (! strcmp (ix86_fpmath_string, "387,sse")
2174 || ! strcmp (ix86_fpmath_string, "sse,387"))
2178 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2179 ix86_fpmath = FPMATH_387;
2181 else if (!TARGET_80387)
2183 warning (0, "387 instruction set disabled, using SSE arithmetics");
2184 ix86_fpmath = FPMATH_SSE;
2187 ix86_fpmath = FPMATH_SSE | FPMATH_387;
2190 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2193 /* If the i387 is disabled, then do not return values in it. */
2195 target_flags &= ~MASK_FLOAT_RETURNS;
2197 if ((x86_accumulate_outgoing_args & TUNEMASK)
2198 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2200 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2202 /* ??? Unwind info is not correct around the CFG unless either a frame
2203 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2204 unwind info generation to be aware of the CFG and propagating states
2206 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2207 || flag_exceptions || flag_non_call_exceptions)
2208 && flag_omit_frame_pointer
2209 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2211 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2212 warning (0, "unwind tables currently require either a frame pointer "
2213 "or -maccumulate-outgoing-args for correctness");
2214 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2217 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2220 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2221 p = strchr (internal_label_prefix, 'X');
2222 internal_label_prefix_len = p - internal_label_prefix;
2226 /* When scheduling description is not available, disable scheduler pass
2227 so it won't slow down the compilation and make x87 code slower. */
2228 if (!TARGET_SCHEDULE)
2229 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2231 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2232 set_param_value ("simultaneous-prefetches",
2233 ix86_cost->simultaneous_prefetches);
2234 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2235 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2238 /* switch to the appropriate section for output of DECL.
2239 DECL is either a `VAR_DECL' node or a constant of some sort.
2240 RELOC indicates whether forming the initial value of DECL requires
2241 link-time relocations. */
2244 x86_64_elf_select_section (tree decl, int reloc,
2245 unsigned HOST_WIDE_INT align)
2247 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2248 && ix86_in_large_data_p (decl))
2250 const char *sname = NULL;
2251 unsigned int flags = SECTION_WRITE;
2252 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2257 case SECCAT_DATA_REL:
2258 sname = ".ldata.rel";
2260 case SECCAT_DATA_REL_LOCAL:
2261 sname = ".ldata.rel.local";
2263 case SECCAT_DATA_REL_RO:
2264 sname = ".ldata.rel.ro";
2266 case SECCAT_DATA_REL_RO_LOCAL:
2267 sname = ".ldata.rel.ro.local";
2271 flags |= SECTION_BSS;
2274 case SECCAT_RODATA_MERGE_STR:
2275 case SECCAT_RODATA_MERGE_STR_INIT:
2276 case SECCAT_RODATA_MERGE_CONST:
2280 case SECCAT_SRODATA:
2287 /* We don't split these for medium model. Place them into
2288 default sections and hope for best. */
2293 /* We might get called with string constants, but get_named_section
2294 doesn't like them as they are not DECLs. Also, we need to set
2295 flags in that case. */
2297 return get_section (sname, flags, NULL);
2298 return get_named_section (decl, sname, reloc);
2301 return default_elf_select_section (decl, reloc, align);
2304 /* Build up a unique section name, expressed as a
2305 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2306 RELOC indicates whether the initial value of EXP requires
2307 link-time relocations. */
2310 x86_64_elf_unique_section (tree decl, int reloc)
2312 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2313 && ix86_in_large_data_p (decl))
2315 const char *prefix = NULL;
2316 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2317 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2319 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2322 case SECCAT_DATA_REL:
2323 case SECCAT_DATA_REL_LOCAL:
2324 case SECCAT_DATA_REL_RO:
2325 case SECCAT_DATA_REL_RO_LOCAL:
2326 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
2329 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
2332 case SECCAT_RODATA_MERGE_STR:
2333 case SECCAT_RODATA_MERGE_STR_INIT:
2334 case SECCAT_RODATA_MERGE_CONST:
2335 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
2337 case SECCAT_SRODATA:
2344 /* We don't split these for medium model. Place them into
2345 default sections and hope for best. */
2353 plen = strlen (prefix);
2355 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2356 name = targetm.strip_name_encoding (name);
2357 nlen = strlen (name);
2359 string = alloca (nlen + plen + 1);
2360 memcpy (string, prefix, plen);
2361 memcpy (string + plen, name, nlen + 1);
2363 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
2367 default_unique_section (decl, reloc);
2370 #ifdef COMMON_ASM_OP
2371 /* This says how to output assembler code to declare an
2372 uninitialized external linkage data object.
2374 For medium model x86-64 we need to use .largecomm opcode for
2377 x86_elf_aligned_common (FILE *file,
2378 const char *name, unsigned HOST_WIDE_INT size,
2381 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2382 && size > (unsigned int)ix86_section_threshold)
2383 fprintf (file, ".largecomm\t");
2385 fprintf (file, "%s", COMMON_ASM_OP);
2386 assemble_name (file, name);
2387 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2388 size, align / BITS_PER_UNIT);
2391 /* Utility function for targets to use in implementing
2392 ASM_OUTPUT_ALIGNED_BSS. */
2395 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2396 const char *name, unsigned HOST_WIDE_INT size,
2399 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2400 && size > (unsigned int)ix86_section_threshold)
2401 switch_to_section (get_named_section (decl, ".lbss", 0));
2403 switch_to_section (bss_section);
2404 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2405 #ifdef ASM_DECLARE_OBJECT_NAME
2406 last_assemble_variable_decl = decl;
2407 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2409 /* Standard thing is just output label for the object. */
2410 ASM_OUTPUT_LABEL (file, name);
2411 #endif /* ASM_DECLARE_OBJECT_NAME */
2412 ASM_OUTPUT_SKIP (file, size ? size : 1);
2416 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2418 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2419 make the problem with not enough registers even worse. */
2420 #ifdef INSN_SCHEDULING
2422 flag_schedule_insns = 0;
2426 /* The Darwin libraries never set errno, so we might as well
2427 avoid calling them when that's the only reason we would. */
2428 flag_errno_math = 0;
2430 /* The default values of these switches depend on the TARGET_64BIT
2431 that is not known at this moment. Mark these values with 2 and
2432 let user the to override these. In case there is no command line option
2433 specifying them, we will set the defaults in override_options. */
2435 flag_omit_frame_pointer = 2;
2436 flag_pcc_struct_return = 2;
2437 flag_asynchronous_unwind_tables = 2;
2438 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
2439 SUBTARGET_OPTIMIZATION_OPTIONS;
2443 /* Table of valid machine attributes. */
2444 const struct attribute_spec ix86_attribute_table[] =
2446 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2447 /* Stdcall attribute says callee is responsible for popping arguments
2448 if they are not variable. */
2449 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2450 /* Fastcall attribute says callee is responsible for popping arguments
2451 if they are not variable. */
2452 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2453 /* Cdecl attribute says the callee is a normal C declaration */
2454 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2455 /* Regparm attribute specifies how many integer arguments are to be
2456 passed in registers. */
2457 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
2458 /* Sseregparm attribute says we are using x86_64 calling conventions
2459 for FP arguments. */
2460 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2461 /* force_align_arg_pointer says this function realigns the stack at entry. */
2462 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
2463 false, true, true, ix86_handle_cconv_attribute },
2464 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2465 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2466 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2467 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
2469 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2470 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2471 #ifdef SUBTARGET_ATTRIBUTE_TABLE
2472 SUBTARGET_ATTRIBUTE_TABLE,
2474 { NULL, 0, 0, false, false, false, NULL }
2477 /* Decide whether we can make a sibling call to a function. DECL is the
2478 declaration of the function being targeted by the call and EXP is the
2479 CALL_EXPR representing the call. */
2482 ix86_function_ok_for_sibcall (tree decl, tree exp)
2487 /* If we are generating position-independent code, we cannot sibcall
2488 optimize any indirect call, or a direct call to a global function,
2489 as the PLT requires %ebx be live. */
2490 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
2497 func = TREE_TYPE (TREE_OPERAND (exp, 0));
2498 if (POINTER_TYPE_P (func))
2499 func = TREE_TYPE (func);
2502 /* Check that the return value locations are the same. Like
2503 if we are returning floats on the 80387 register stack, we cannot
2504 make a sibcall from a function that doesn't return a float to a
2505 function that does or, conversely, from a function that does return
2506 a float to a function that doesn't; the necessary stack adjustment
2507 would not be executed. This is also the place we notice
2508 differences in the return value ABI. Note that it is ok for one
2509 of the functions to have void return type as long as the return
2510 value of the other is passed in a register. */
2511 a = ix86_function_value (TREE_TYPE (exp), func, false);
2512 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2514 if (STACK_REG_P (a) || STACK_REG_P (b))
2516 if (!rtx_equal_p (a, b))
2519 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2521 else if (!rtx_equal_p (a, b))
2524 /* If this call is indirect, we'll need to be able to use a call-clobbered
2525 register for the address of the target function. Make sure that all
2526 such registers are not used for passing parameters. */
2527 if (!decl && !TARGET_64BIT)
2531 /* We're looking at the CALL_EXPR, we need the type of the function. */
2532 type = TREE_OPERAND (exp, 0); /* pointer expression */
2533 type = TREE_TYPE (type); /* pointer type */
2534 type = TREE_TYPE (type); /* function type */
2536 if (ix86_function_regparm (type, NULL) >= 3)
2538 /* ??? Need to count the actual number of registers to be used,
2539 not the possible number of registers. Fix later. */
2544 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2545 /* Dllimport'd functions are also called indirectly. */
2546 if (decl && DECL_DLLIMPORT_P (decl)
2547 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2551 /* If we forced aligned the stack, then sibcalling would unalign the
2552 stack, which may break the called function. */
2553 if (cfun->machine->force_align_arg_pointer)
2556 /* Otherwise okay. That also includes certain types of indirect calls. */
2560 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2561 calling convention attributes;
2562 arguments as in struct attribute_spec.handler. */
2565 ix86_handle_cconv_attribute (tree *node, tree name,
2567 int flags ATTRIBUTE_UNUSED,
2570 if (TREE_CODE (*node) != FUNCTION_TYPE
2571 && TREE_CODE (*node) != METHOD_TYPE
2572 && TREE_CODE (*node) != FIELD_DECL
2573 && TREE_CODE (*node) != TYPE_DECL)
2575 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2576 IDENTIFIER_POINTER (name));
2577 *no_add_attrs = true;
2581 /* Can combine regparm with all attributes but fastcall. */
2582 if (is_attribute_p ("regparm", name))
2586 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2588 error ("fastcall and regparm attributes are not compatible");
2591 cst = TREE_VALUE (args);
2592 if (TREE_CODE (cst) != INTEGER_CST)
2594 warning (OPT_Wattributes,
2595 "%qs attribute requires an integer constant argument",
2596 IDENTIFIER_POINTER (name));
2597 *no_add_attrs = true;
2599 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2601 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2602 IDENTIFIER_POINTER (name), REGPARM_MAX);
2603 *no_add_attrs = true;
2607 && lookup_attribute (ix86_force_align_arg_pointer_string,
2608 TYPE_ATTRIBUTES (*node))
2609 && compare_tree_int (cst, REGPARM_MAX-1))
2611 error ("%s functions limited to %d register parameters",
2612 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
2620 warning (OPT_Wattributes, "%qs attribute ignored",
2621 IDENTIFIER_POINTER (name));
2622 *no_add_attrs = true;
2626 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2627 if (is_attribute_p ("fastcall", name))
2629 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2631 error ("fastcall and cdecl attributes are not compatible");
2633 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2635 error ("fastcall and stdcall attributes are not compatible");
2637 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2639 error ("fastcall and regparm attributes are not compatible");
2643 /* Can combine stdcall with fastcall (redundant), regparm and
2645 else if (is_attribute_p ("stdcall", name))
2647 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2649 error ("stdcall and cdecl attributes are not compatible");
2651 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2653 error ("stdcall and fastcall attributes are not compatible");
2657 /* Can combine cdecl with regparm and sseregparm. */
2658 else if (is_attribute_p ("cdecl", name))
2660 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2662 error ("stdcall and cdecl attributes are not compatible");
2664 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2666 error ("fastcall and cdecl attributes are not compatible");
2670 /* Can combine sseregparm with all attributes. */
2675 /* Return 0 if the attributes for two types are incompatible, 1 if they
2676 are compatible, and 2 if they are nearly compatible (which causes a
2677 warning to be generated). */
2680 ix86_comp_type_attributes (tree type1, tree type2)
2682 /* Check for mismatch of non-default calling convention. */
2683 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2685 if (TREE_CODE (type1) != FUNCTION_TYPE)
2688 /* Check for mismatched fastcall/regparm types. */
2689 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2690 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2691 || (ix86_function_regparm (type1, NULL)
2692 != ix86_function_regparm (type2, NULL)))
2695 /* Check for mismatched sseregparm types. */
2696 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2697 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2700 /* Check for mismatched return types (cdecl vs stdcall). */
2701 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2702 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2708 /* Return the regparm value for a function with the indicated TYPE and DECL.
2709 DECL may be NULL when calling function indirectly
2710 or considering a libcall. */
2713 ix86_function_regparm (tree type, tree decl)
2716 int regparm = ix86_regparm;
2717 bool user_convention = false;
2721 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2724 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2725 user_convention = true;
2728 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2731 user_convention = true;
2734 /* Use register calling convention for local functions when possible. */
2735 if (!TARGET_64BIT && !user_convention && decl
2736 && flag_unit_at_a_time && !profile_flag)
2738 struct cgraph_local_info *i = cgraph_local_info (decl);
2741 int local_regparm, globals = 0, regno;
2743 /* Make sure no regparm register is taken by a global register
2745 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2746 if (global_regs[local_regparm])
2748 /* We can't use regparm(3) for nested functions as these use
2749 static chain pointer in third argument. */
2750 if (local_regparm == 3
2751 && decl_function_context (decl)
2752 && !DECL_NO_STATIC_CHAIN (decl))
2754 /* If the function realigns its stackpointer, the
2755 prologue will clobber %ecx. If we've already
2756 generated code for the callee, the callee
2757 DECL_STRUCT_FUNCTION is gone, so we fall back to
2758 scanning the attributes for the self-realigning
2760 if ((DECL_STRUCT_FUNCTION (decl)
2761 && DECL_STRUCT_FUNCTION (decl)->machine->force_align_arg_pointer)
2762 || (!DECL_STRUCT_FUNCTION (decl)
2763 && lookup_attribute (ix86_force_align_arg_pointer_string,
2764 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
2766 /* Each global register variable increases register preassure,
2767 so the more global reg vars there are, the smaller regparm
2768 optimization use, unless requested by the user explicitly. */
2769 for (regno = 0; regno < 6; regno++)
2770 if (global_regs[regno])
2773 = globals < local_regparm ? local_regparm - globals : 0;
2775 if (local_regparm > regparm)
2776 regparm = local_regparm;
2783 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
2784 DFmode (2) arguments in SSE registers for a function with the
2785 indicated TYPE and DECL. DECL may be NULL when calling function
2786 indirectly or considering a libcall. Otherwise return 0. */
2789 ix86_function_sseregparm (tree type, tree decl)
2791 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2792 by the sseregparm attribute. */
2793 if (TARGET_SSEREGPARM
2795 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2800 error ("Calling %qD with attribute sseregparm without "
2801 "SSE/SSE2 enabled", decl);
2803 error ("Calling %qT with attribute sseregparm without "
2804 "SSE/SSE2 enabled", type);
2811 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
2812 (and DFmode for SSE2) arguments in SSE registers,
2813 even for 32-bit targets. */
2814 if (!TARGET_64BIT && decl
2815 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2817 struct cgraph_local_info *i = cgraph_local_info (decl);
2819 return TARGET_SSE2 ? 2 : 1;
2825 /* Return true if EAX is live at the start of the function. Used by
2826 ix86_expand_prologue to determine if we need special help before
2827 calling allocate_stack_worker. */
2830 ix86_eax_live_at_start_p (void)
2832 /* Cheat. Don't bother working forward from ix86_function_regparm
2833 to the function type to whether an actual argument is located in
2834 eax. Instead just look at cfg info, which is still close enough
2835 to correct at this point. This gives false positives for broken
2836 functions that might use uninitialized data that happens to be
2837 allocated in eax, but who cares? */
2838 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2841 /* Value is the number of bytes of arguments automatically
2842 popped when returning from a subroutine call.
2843 FUNDECL is the declaration node of the function (as a tree),
2844 FUNTYPE is the data type of the function (as a tree),
2845 or for a library call it is an identifier node for the subroutine name.
2846 SIZE is the number of bytes of arguments passed on the stack.
2848 On the 80386, the RTD insn may be used to pop them if the number
2849 of args is fixed, but if the number is variable then the caller
2850 must pop them all. RTD can't be used for library calls now
2851 because the library is compiled with the Unix compiler.
2852 Use of RTD is a selectable option, since it is incompatible with
2853 standard Unix calling sequences. If the option is not selected,
2854 the caller must always pop the args.
2856 The attribute stdcall is equivalent to RTD on a per module basis. */
2859 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2861 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2863 /* Cdecl functions override -mrtd, and never pop the stack. */
2864 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2866 /* Stdcall and fastcall functions will pop the stack if not
2868 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2869 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2873 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2874 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2875 == void_type_node)))
2879 /* Lose any fake structure return argument if it is passed on the stack. */
2880 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2882 && !KEEP_AGGREGATE_RETURN_POINTER)
2884 int nregs = ix86_function_regparm (funtype, fundecl);
2887 return GET_MODE_SIZE (Pmode);
2893 /* Argument support functions. */
2895 /* Return true when register may be used to pass function parameters. */
2897 ix86_function_arg_regno_p (int regno)
2901 return (regno < REGPARM_MAX
2902 || (TARGET_MMX && MMX_REGNO_P (regno)
2903 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2904 || (TARGET_SSE && SSE_REGNO_P (regno)
2905 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2907 if (TARGET_SSE && SSE_REGNO_P (regno)
2908 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2910 /* RAX is used as hidden argument to va_arg functions. */
2913 for (i = 0; i < REGPARM_MAX; i++)
2914 if (regno == x86_64_int_parameter_registers[i])
2919 /* Return if we do not know how to pass TYPE solely in registers. */
2922 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2924 if (must_pass_in_stack_var_size_or_pad (mode, type))
2927 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2928 The layout_type routine is crafty and tries to trick us into passing
2929 currently unsupported vector types on the stack by using TImode. */
2930 return (!TARGET_64BIT && mode == TImode
2931 && type && TREE_CODE (type) != VECTOR_TYPE);
2934 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2935 for a call to a function whose data type is FNTYPE.
2936 For a library call, FNTYPE is 0. */
2939 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2940 tree fntype, /* tree ptr for function decl */
2941 rtx libname, /* SYMBOL_REF of library name or 0 */
2944 static CUMULATIVE_ARGS zero_cum;
2945 tree param, next_param;
2947 if (TARGET_DEBUG_ARG)
2949 fprintf (stderr, "\ninit_cumulative_args (");
2951 fprintf (stderr, "fntype code = %s, ret code = %s",
2952 tree_code_name[(int) TREE_CODE (fntype)],
2953 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2955 fprintf (stderr, "no fntype");
2958 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2963 /* Set up the number of registers to use for passing arguments. */
2964 cum->nregs = ix86_regparm;
2966 cum->sse_nregs = SSE_REGPARM_MAX;
2968 cum->mmx_nregs = MMX_REGPARM_MAX;
2969 cum->warn_sse = true;
2970 cum->warn_mmx = true;
2971 cum->maybe_vaarg = false;
2973 /* Use ecx and edx registers if function has fastcall attribute,
2974 else look for regparm information. */
2975 if (fntype && !TARGET_64BIT)
2977 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2983 cum->nregs = ix86_function_regparm (fntype, fndecl);
2986 /* Set up the number of SSE registers used for passing SFmode
2987 and DFmode arguments. Warn for mismatching ABI. */
2988 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2990 /* Determine if this function has variable arguments. This is
2991 indicated by the last argument being 'void_type_mode' if there
2992 are no variable arguments. If there are variable arguments, then
2993 we won't pass anything in registers in 32-bit mode. */
2995 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2997 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2998 param != 0; param = next_param)
3000 next_param = TREE_CHAIN (param);
3001 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3011 cum->float_in_sse = 0;
3013 cum->maybe_vaarg = true;
3017 if ((!fntype && !libname)
3018 || (fntype && !TYPE_ARG_TYPES (fntype)))
3019 cum->maybe_vaarg = true;
3021 if (TARGET_DEBUG_ARG)
3022 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
3027 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3028 But in the case of vector types, it is some vector mode.
3030 When we have only some of our vector isa extensions enabled, then there
3031 are some modes for which vector_mode_supported_p is false. For these
3032 modes, the generic vector support in gcc will choose some non-vector mode
3033 in order to implement the type. By computing the natural mode, we'll
3034 select the proper ABI location for the operand and not depend on whatever
3035 the middle-end decides to do with these vector types. */
3037 static enum machine_mode
3038 type_natural_mode (tree type)
3040 enum machine_mode mode = TYPE_MODE (type);
3042 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3044 HOST_WIDE_INT size = int_size_in_bytes (type);
3045 if ((size == 8 || size == 16)
3046 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3047 && TYPE_VECTOR_SUBPARTS (type) > 1)
3049 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3051 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3052 mode = MIN_MODE_VECTOR_FLOAT;
3054 mode = MIN_MODE_VECTOR_INT;
3056 /* Get the mode which has this inner mode and number of units. */
3057 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3058 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3059 && GET_MODE_INNER (mode) == innermode)
3069 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3070 this may not agree with the mode that the type system has chosen for the
3071 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3072 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3075 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3080 if (orig_mode != BLKmode)
3081 tmp = gen_rtx_REG (orig_mode, regno);
3084 tmp = gen_rtx_REG (mode, regno);
3085 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3086 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3092 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3093 of this code is to classify each 8bytes of incoming argument by the register
3094 class and assign registers accordingly. */
3096 /* Return the union class of CLASS1 and CLASS2.
3097 See the x86-64 PS ABI for details. */
3099 static enum x86_64_reg_class
3100 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3102 /* Rule #1: If both classes are equal, this is the resulting class. */
3103 if (class1 == class2)
3106 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3108 if (class1 == X86_64_NO_CLASS)
3110 if (class2 == X86_64_NO_CLASS)
3113 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3114 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3115 return X86_64_MEMORY_CLASS;
3117 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3118 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3119 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3120 return X86_64_INTEGERSI_CLASS;
3121 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3122 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3123 return X86_64_INTEGER_CLASS;
3125 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3127 if (class1 == X86_64_X87_CLASS
3128 || class1 == X86_64_X87UP_CLASS
3129 || class1 == X86_64_COMPLEX_X87_CLASS
3130 || class2 == X86_64_X87_CLASS
3131 || class2 == X86_64_X87UP_CLASS
3132 || class2 == X86_64_COMPLEX_X87_CLASS)
3133 return X86_64_MEMORY_CLASS;
3135 /* Rule #6: Otherwise class SSE is used. */
3136 return X86_64_SSE_CLASS;
3139 /* Classify the argument of type TYPE and mode MODE.
3140 CLASSES will be filled by the register class used to pass each word
3141 of the operand. The number of words is returned. In case the parameter
3142 should be passed in memory, 0 is returned. As a special case for zero
3143 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3145 BIT_OFFSET is used internally for handling records and specifies offset
3146 of the offset in bits modulo 256 to avoid overflow cases.
3148 See the x86-64 PS ABI for details.
3152 classify_argument (enum machine_mode mode, tree type,
3153 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3155 HOST_WIDE_INT bytes =
3156 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3157 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3159 /* Variable sized entities are always passed/returned in memory. */
3163 if (mode != VOIDmode
3164 && targetm.calls.must_pass_in_stack (mode, type))
3167 if (type && AGGREGATE_TYPE_P (type))
3171 enum x86_64_reg_class subclasses[MAX_CLASSES];
3173 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3177 for (i = 0; i < words; i++)
3178 classes[i] = X86_64_NO_CLASS;
3180 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3181 signalize memory class, so handle it as special case. */
3184 classes[0] = X86_64_NO_CLASS;
3188 /* Classify each field of record and merge classes. */
3189 switch (TREE_CODE (type))
3192 /* And now merge the fields of structure. */
3193 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3195 if (TREE_CODE (field) == FIELD_DECL)
3199 if (TREE_TYPE (field) == error_mark_node)
3202 /* Bitfields are always classified as integer. Handle them
3203 early, since later code would consider them to be
3204 misaligned integers. */
3205 if (DECL_BIT_FIELD (field))
3207 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3208 i < ((int_bit_position (field) + (bit_offset % 64))
3209 + tree_low_cst (DECL_SIZE (field), 0)
3212 merge_classes (X86_64_INTEGER_CLASS,
3217 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3218 TREE_TYPE (field), subclasses,
3219 (int_bit_position (field)
3220 + bit_offset) % 256);
3223 for (i = 0; i < num; i++)
3226 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3228 merge_classes (subclasses[i], classes[i + pos]);
3236 /* Arrays are handled as small records. */
3239 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3240 TREE_TYPE (type), subclasses, bit_offset);
3244 /* The partial classes are now full classes. */
3245 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3246 subclasses[0] = X86_64_SSE_CLASS;
3247 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3248 subclasses[0] = X86_64_INTEGER_CLASS;
3250 for (i = 0; i < words; i++)
3251 classes[i] = subclasses[i % num];
3256 case QUAL_UNION_TYPE:
3257 /* Unions are similar to RECORD_TYPE but offset is always 0.
3259 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3261 if (TREE_CODE (field) == FIELD_DECL)
3265 if (TREE_TYPE (field) == error_mark_node)
3268 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3269 TREE_TYPE (field), subclasses,
3273 for (i = 0; i < num; i++)
3274 classes[i] = merge_classes (subclasses[i], classes[i]);
3283 /* Final merger cleanup. */
3284 for (i = 0; i < words; i++)
3286 /* If one class is MEMORY, everything should be passed in
3288 if (classes[i] == X86_64_MEMORY_CLASS)
3291 /* The X86_64_SSEUP_CLASS should be always preceded by
3292 X86_64_SSE_CLASS. */
3293 if (classes[i] == X86_64_SSEUP_CLASS
3294 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3295 classes[i] = X86_64_SSE_CLASS;
3297 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3298 if (classes[i] == X86_64_X87UP_CLASS
3299 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3300 classes[i] = X86_64_SSE_CLASS;
3305 /* Compute alignment needed. We align all types to natural boundaries with
3306 exception of XFmode that is aligned to 64bits. */
3307 if (mode != VOIDmode && mode != BLKmode)
3309 int mode_alignment = GET_MODE_BITSIZE (mode);
3312 mode_alignment = 128;
3313 else if (mode == XCmode)
3314 mode_alignment = 256;
3315 if (COMPLEX_MODE_P (mode))
3316 mode_alignment /= 2;
3317 /* Misaligned fields are always returned in memory. */
3318 if (bit_offset % mode_alignment)
3322 /* for V1xx modes, just use the base mode */
3323 if (VECTOR_MODE_P (mode)
3324 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3325 mode = GET_MODE_INNER (mode);
3327 /* Classification of atomic types. */
3332 classes[0] = X86_64_SSE_CLASS;
3335 classes[0] = X86_64_SSE_CLASS;
3336 classes[1] = X86_64_SSEUP_CLASS;
3345 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3346 classes[0] = X86_64_INTEGERSI_CLASS;
3348 classes[0] = X86_64_INTEGER_CLASS;
3352 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3357 if (!(bit_offset % 64))
3358 classes[0] = X86_64_SSESF_CLASS;
3360 classes[0] = X86_64_SSE_CLASS;
3363 classes[0] = X86_64_SSEDF_CLASS;
3366 classes[0] = X86_64_X87_CLASS;
3367 classes[1] = X86_64_X87UP_CLASS;
3370 classes[0] = X86_64_SSE_CLASS;
3371 classes[1] = X86_64_SSEUP_CLASS;
3374 classes[0] = X86_64_SSE_CLASS;
3377 classes[0] = X86_64_SSEDF_CLASS;
3378 classes[1] = X86_64_SSEDF_CLASS;
3381 classes[0] = X86_64_COMPLEX_X87_CLASS;
3384 /* This modes is larger than 16 bytes. */
3392 classes[0] = X86_64_SSE_CLASS;
3393 classes[1] = X86_64_SSEUP_CLASS;
3399 classes[0] = X86_64_SSE_CLASS;
3405 gcc_assert (VECTOR_MODE_P (mode));
3410 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3412 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3413 classes[0] = X86_64_INTEGERSI_CLASS;
3415 classes[0] = X86_64_INTEGER_CLASS;
3416 classes[1] = X86_64_INTEGER_CLASS;
3417 return 1 + (bytes > 8);
3421 /* Examine the argument and return set number of register required in each
3422 class. Return 0 iff parameter should be passed in memory. */
3424 examine_argument (enum machine_mode mode, tree type, int in_return,
3425 int *int_nregs, int *sse_nregs)
3427 enum x86_64_reg_class class[MAX_CLASSES];
3428 int n = classify_argument (mode, type, class, 0);
3434 for (n--; n >= 0; n--)
3437 case X86_64_INTEGER_CLASS:
3438 case X86_64_INTEGERSI_CLASS:
3441 case X86_64_SSE_CLASS:
3442 case X86_64_SSESF_CLASS:
3443 case X86_64_SSEDF_CLASS:
3446 case X86_64_NO_CLASS:
3447 case X86_64_SSEUP_CLASS:
3449 case X86_64_X87_CLASS:
3450 case X86_64_X87UP_CLASS:
3454 case X86_64_COMPLEX_X87_CLASS:
3455 return in_return ? 2 : 0;
3456 case X86_64_MEMORY_CLASS:
3462 /* Construct container for the argument used by GCC interface. See
3463 FUNCTION_ARG for the detailed description. */
3466 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
3467 tree type, int in_return, int nintregs, int nsseregs,
3468 const int *intreg, int sse_regno)
3470 /* The following variables hold the static issued_error state. */
3471 static bool issued_sse_arg_error;
3472 static bool issued_sse_ret_error;
3473 static bool issued_x87_ret_error;
3475 enum machine_mode tmpmode;
3477 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3478 enum x86_64_reg_class class[MAX_CLASSES];
3482 int needed_sseregs, needed_intregs;
3483 rtx exp[MAX_CLASSES];
3486 n = classify_argument (mode, type, class, 0);
3487 if (TARGET_DEBUG_ARG)
3490 fprintf (stderr, "Memory class\n");
3493 fprintf (stderr, "Classes:");
3494 for (i = 0; i < n; i++)
3496 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
3498 fprintf (stderr, "\n");
3503 if (!examine_argument (mode, type, in_return, &needed_intregs,
3506 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
3509 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
3510 some less clueful developer tries to use floating-point anyway. */
3511 if (needed_sseregs && !TARGET_SSE)
3515 if (!issued_sse_ret_error)
3517 error ("SSE register return with SSE disabled");
3518 issued_sse_ret_error = true;
3521 else if (!issued_sse_arg_error)
3523 error ("SSE register argument with SSE disabled");
3524 issued_sse_arg_error = true;
3529 /* Likewise, error if the ABI requires us to return values in the
3530 x87 registers and the user specified -mno-80387. */
3531 if (!TARGET_80387 && in_return)
3532 for (i = 0; i < n; i++)
3533 if (class[i] == X86_64_X87_CLASS
3534 || class[i] == X86_64_X87UP_CLASS
3535 || class[i] == X86_64_COMPLEX_X87_CLASS)
3537 if (!issued_x87_ret_error)
3539 error ("x87 register return with x87 disabled");
3540 issued_x87_ret_error = true;
3545 /* First construct simple cases. Avoid SCmode, since we want to use
3546 single register to pass this type. */
3547 if (n == 1 && mode != SCmode)
3550 case X86_64_INTEGER_CLASS:
3551 case X86_64_INTEGERSI_CLASS:
3552 return gen_rtx_REG (mode, intreg[0]);
3553 case X86_64_SSE_CLASS:
3554 case X86_64_SSESF_CLASS:
3555 case X86_64_SSEDF_CLASS:
3556 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
3557 case X86_64_X87_CLASS:
3558 case X86_64_COMPLEX_X87_CLASS:
3559 return gen_rtx_REG (mode, FIRST_STACK_REG);
3560 case X86_64_NO_CLASS:
3561 /* Zero sized array, struct or class. */
3566 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
3568 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
3570 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
3571 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3572 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3573 && class[1] == X86_64_INTEGER_CLASS
3574 && (mode == CDImode || mode == TImode || mode == TFmode)
3575 && intreg[0] + 1 == intreg[1])
3576 return gen_rtx_REG (mode, intreg[0]);
3578 /* Otherwise figure out the entries of the PARALLEL. */
3579 for (i = 0; i < n; i++)
3583 case X86_64_NO_CLASS:
3585 case X86_64_INTEGER_CLASS:
3586 case X86_64_INTEGERSI_CLASS:
3587 /* Merge TImodes on aligned occasions here too. */
3588 if (i * 8 + 8 > bytes)
3589 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3590 else if (class[i] == X86_64_INTEGERSI_CLASS)
3594 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3595 if (tmpmode == BLKmode)
3597 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3598 gen_rtx_REG (tmpmode, *intreg),
3602 case X86_64_SSESF_CLASS:
3603 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3604 gen_rtx_REG (SFmode,
3605 SSE_REGNO (sse_regno)),
3609 case X86_64_SSEDF_CLASS:
3610 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3611 gen_rtx_REG (DFmode,
3612 SSE_REGNO (sse_regno)),
3616 case X86_64_SSE_CLASS:
3617 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3621 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3622 gen_rtx_REG (tmpmode,
3623 SSE_REGNO (sse_regno)),
3625 if (tmpmode == TImode)
3634 /* Empty aligned struct, union or class. */
3638 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3639 for (i = 0; i < nexps; i++)
3640 XVECEXP (ret, 0, i) = exp [i];
3644 /* Update the data in CUM to advance over an argument
3645 of mode MODE and data type TYPE.
3646 (TYPE is null for libcalls where that information may not be available.) */
3649 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3650 tree type, int named)
3653 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3654 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3657 mode = type_natural_mode (type);
3659 if (TARGET_DEBUG_ARG)
3660 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3661 "mode=%s, named=%d)\n\n",
3662 words, cum->words, cum->nregs, cum->sse_nregs,
3663 GET_MODE_NAME (mode), named);
3667 int int_nregs, sse_nregs;
3668 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3669 cum->words += words;
3670 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3672 cum->nregs -= int_nregs;
3673 cum->sse_nregs -= sse_nregs;
3674 cum->regno += int_nregs;
3675 cum->sse_regno += sse_nregs;
3678 cum->words += words;
3696 cum->words += words;
3697 cum->nregs -= words;
3698 cum->regno += words;
3700 if (cum->nregs <= 0)
3708 if (cum->float_in_sse < 2)
3711 if (cum->float_in_sse < 1)
3722 if (!type || !AGGREGATE_TYPE_P (type))
3724 cum->sse_words += words;
3725 cum->sse_nregs -= 1;
3726 cum->sse_regno += 1;
3727 if (cum->sse_nregs <= 0)
3739 if (!type || !AGGREGATE_TYPE_P (type))
3741 cum->mmx_words += words;
3742 cum->mmx_nregs -= 1;
3743 cum->mmx_regno += 1;
3744 if (cum->mmx_nregs <= 0)
3755 /* Define where to put the arguments to a function.
3756 Value is zero to push the argument on the stack,
3757 or a hard register in which to store the argument.
3759 MODE is the argument's machine mode.
3760 TYPE is the data type of the argument (as a tree).
3761 This is null for libcalls where that information may
3763 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3764 the preceding args and about the function being called.
3765 NAMED is nonzero if this argument is a named parameter
3766 (otherwise it is an extra parameter matching an ellipsis). */
3769 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3770 tree type, int named)
3772 enum machine_mode mode = orig_mode;
3775 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3776 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3777 static bool warnedsse, warnedmmx;
3779 /* To simplify the code below, represent vector types with a vector mode
3780 even if MMX/SSE are not active. */
3781 if (type && TREE_CODE (type) == VECTOR_TYPE)
3782 mode = type_natural_mode (type);
3784 /* Handle a hidden AL argument containing number of registers for varargs
3785 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3787 if (mode == VOIDmode)
3790 return GEN_INT (cum->maybe_vaarg
3791 ? (cum->sse_nregs < 0
3799 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3801 &x86_64_int_parameter_registers [cum->regno],
3806 /* For now, pass fp/complex values on the stack. */
3818 if (words <= cum->nregs)
3820 int regno = cum->regno;
3822 /* Fastcall allocates the first two DWORD (SImode) or
3823 smaller arguments to ECX and EDX. */
3826 if (mode == BLKmode || mode == DImode)
3829 /* ECX not EAX is the first allocated register. */
3833 ret = gen_rtx_REG (mode, regno);
3837 if (cum->float_in_sse < 2)
3840 if (cum->float_in_sse < 1)
3850 if (!type || !AGGREGATE_TYPE_P (type))
3852 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3855 warning (0, "SSE vector argument without SSE enabled "
3859 ret = gen_reg_or_parallel (mode, orig_mode,
3860 cum->sse_regno + FIRST_SSE_REG);
3867 if (!type || !AGGREGATE_TYPE_P (type))
3869 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3872 warning (0, "MMX vector argument without MMX enabled "
3876 ret = gen_reg_or_parallel (mode, orig_mode,
3877 cum->mmx_regno + FIRST_MMX_REG);
3882 if (TARGET_DEBUG_ARG)
3885 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3886 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3889 print_simple_rtl (stderr, ret);
3891 fprintf (stderr, ", stack");
3893 fprintf (stderr, " )\n");
3899 /* A C expression that indicates when an argument must be passed by
3900 reference. If nonzero for an argument, a copy of that argument is
3901 made in memory and a pointer to the argument is passed instead of
3902 the argument itself. The pointer is passed in whatever way is
3903 appropriate for passing a pointer to that type. */
3906 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3907 enum machine_mode mode ATTRIBUTE_UNUSED,
3908 tree type, bool named ATTRIBUTE_UNUSED)
3913 if (type && int_size_in_bytes (type) == -1)
3915 if (TARGET_DEBUG_ARG)
3916 fprintf (stderr, "function_arg_pass_by_reference\n");
3923 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3924 ABI. Only called if TARGET_SSE. */
3926 contains_128bit_aligned_vector_p (tree type)
3928 enum machine_mode mode = TYPE_MODE (type);
3929 if (SSE_REG_MODE_P (mode)
3930 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3932 if (TYPE_ALIGN (type) < 128)
3935 if (AGGREGATE_TYPE_P (type))
3937 /* Walk the aggregates recursively. */
3938 switch (TREE_CODE (type))
3942 case QUAL_UNION_TYPE:
3946 /* Walk all the structure fields. */
3947 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3949 if (TREE_CODE (field) == FIELD_DECL
3950 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3957 /* Just for use if some languages passes arrays by value. */
3958 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3969 /* Gives the alignment boundary, in bits, of an argument with the
3970 specified mode and type. */
3973 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3977 align = TYPE_ALIGN (type);
3979 align = GET_MODE_ALIGNMENT (mode);
3980 if (align < PARM_BOUNDARY)
3981 align = PARM_BOUNDARY;
3984 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3985 make an exception for SSE modes since these require 128bit
3988 The handling here differs from field_alignment. ICC aligns MMX
3989 arguments to 4 byte boundaries, while structure fields are aligned
3990 to 8 byte boundaries. */
3992 align = PARM_BOUNDARY;
3995 if (!SSE_REG_MODE_P (mode))
3996 align = PARM_BOUNDARY;
4000 if (!contains_128bit_aligned_vector_p (type))
4001 align = PARM_BOUNDARY;
4009 /* Return true if N is a possible register number of function value. */
4011 ix86_function_value_regno_p (int regno)
4014 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
4015 || (regno == FIRST_SSE_REG && TARGET_SSE))
4019 && (regno == FIRST_MMX_REG && TARGET_MMX))
4025 /* Define how to find the value returned by a function.
4026 VALTYPE is the data type of the value (as a tree).
4027 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4028 otherwise, FUNC is 0. */
4030 ix86_function_value (tree valtype, tree fntype_or_decl,
4031 bool outgoing ATTRIBUTE_UNUSED)
4033 enum machine_mode natmode = type_natural_mode (valtype);
4037 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
4038 1, REGPARM_MAX, SSE_REGPARM_MAX,
4039 x86_64_int_return_registers, 0);
4040 /* For zero sized structures, construct_container return NULL, but we
4041 need to keep rest of compiler happy by returning meaningful value. */
4043 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
4048 tree fn = NULL_TREE, fntype;
4050 && DECL_P (fntype_or_decl))
4051 fn = fntype_or_decl;
4052 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4053 return gen_rtx_REG (TYPE_MODE (valtype),
4054 ix86_value_regno (natmode, fn, fntype));
4058 /* Return true iff type is returned in memory. */
4060 ix86_return_in_memory (tree type)
4062 int needed_intregs, needed_sseregs, size;
4063 enum machine_mode mode = type_natural_mode (type);
4066 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4068 if (mode == BLKmode)
4071 size = int_size_in_bytes (type);
4073 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4076 if (VECTOR_MODE_P (mode) || mode == TImode)
4078 /* User-created vectors small enough to fit in EAX. */
4082 /* MMX/3dNow values are returned in MM0,
4083 except when it doesn't exits. */
4085 return (TARGET_MMX ? 0 : 1);
4087 /* SSE values are returned in XMM0, except when it doesn't exist. */
4089 return (TARGET_SSE ? 0 : 1);
4103 /* When returning SSE vector types, we have a choice of either
4104 (1) being abi incompatible with a -march switch, or
4105 (2) generating an error.
4106 Given no good solution, I think the safest thing is one warning.
4107 The user won't be able to use -Werror, but....
4109 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4110 called in response to actually generating a caller or callee that
4111 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
4112 via aggregate_value_p for general type probing from tree-ssa. */
4115 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4117 static bool warnedsse, warnedmmx;
4121 /* Look at the return type of the function, not the function type. */
4122 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4124 if (!TARGET_SSE && !warnedsse)
4127 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4130 warning (0, "SSE vector return without SSE enabled "
4135 if (!TARGET_MMX && !warnedmmx)
4137 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4140 warning (0, "MMX vector return without MMX enabled "
4149 /* Define how to find the value returned by a library function
4150 assuming the value has mode MODE. */
4152 ix86_libcall_value (enum machine_mode mode)
4166 return gen_rtx_REG (mode, FIRST_SSE_REG);
4169 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4173 return gen_rtx_REG (mode, 0);
4177 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
4180 /* Given a mode, return the register to use for a return value. */
4183 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
4185 gcc_assert (!TARGET_64BIT);
4187 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4188 we normally prevent this case when mmx is not available. However
4189 some ABIs may require the result to be returned like DImode. */
4190 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4191 return TARGET_MMX ? FIRST_MMX_REG : 0;
4193 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4194 we prevent this case when sse is not available. However some ABIs
4195 may require the result to be returned like integer TImode. */
4196 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4197 return TARGET_SSE ? FIRST_SSE_REG : 0;
4199 /* Decimal floating point values can go in %eax, unlike other float modes. */
4200 if (DECIMAL_FLOAT_MODE_P (mode))
4203 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
4204 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
4207 /* Floating point return values in %st(0), except for local functions when
4208 SSE math is enabled or for functions with sseregparm attribute. */
4209 if ((func || fntype)
4210 && (mode == SFmode || mode == DFmode))
4212 int sse_level = ix86_function_sseregparm (fntype, func);
4213 if ((sse_level >= 1 && mode == SFmode)
4214 || (sse_level == 2 && mode == DFmode))
4215 return FIRST_SSE_REG;
4218 return FIRST_FLOAT_REG;
4221 /* Create the va_list data type. */
4224 ix86_build_builtin_va_list (void)
4226 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
4228 /* For i386 we use plain pointer to argument area. */
4230 return build_pointer_type (char_type_node);
4232 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4233 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
4235 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
4236 unsigned_type_node);
4237 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
4238 unsigned_type_node);
4239 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
4241 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
4244 va_list_gpr_counter_field = f_gpr;
4245 va_list_fpr_counter_field = f_fpr;
4247 DECL_FIELD_CONTEXT (f_gpr) = record;
4248 DECL_FIELD_CONTEXT (f_fpr) = record;
4249 DECL_FIELD_CONTEXT (f_ovf) = record;
4250 DECL_FIELD_CONTEXT (f_sav) = record;
4252 TREE_CHAIN (record) = type_decl;
4253 TYPE_NAME (record) = type_decl;
4254 TYPE_FIELDS (record) = f_gpr;
4255 TREE_CHAIN (f_gpr) = f_fpr;
4256 TREE_CHAIN (f_fpr) = f_ovf;
4257 TREE_CHAIN (f_ovf) = f_sav;
4259 layout_type (record);
4261 /* The correct type is an array type of one element. */
4262 return build_array_type (record, build_index_type (size_zero_node));
4265 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
4268 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4269 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4272 CUMULATIVE_ARGS next_cum;
4273 rtx save_area = NULL_RTX, mem;
4286 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
4289 /* Indicate to allocate space on the stack for varargs save area. */
4290 ix86_save_varrargs_registers = 1;
4292 cfun->stack_alignment_needed = 128;
4294 fntype = TREE_TYPE (current_function_decl);
4295 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
4296 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
4297 != void_type_node));
4299 /* For varargs, we do not want to skip the dummy va_dcl argument.
4300 For stdargs, we do want to skip the last named argument. */
4303 function_arg_advance (&next_cum, mode, type, 1);
4306 save_area = frame_pointer_rtx;
4308 set = get_varargs_alias_set ();
4310 for (i = next_cum.regno;
4312 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
4315 mem = gen_rtx_MEM (Pmode,
4316 plus_constant (save_area, i * UNITS_PER_WORD));
4317 MEM_NOTRAP_P (mem) = 1;
4318 set_mem_alias_set (mem, set);
4319 emit_move_insn (mem, gen_rtx_REG (Pmode,
4320 x86_64_int_parameter_registers[i]));
4323 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
4325 /* Now emit code to save SSE registers. The AX parameter contains number
4326 of SSE parameter registers used to call this function. We use
4327 sse_prologue_save insn template that produces computed jump across
4328 SSE saves. We need some preparation work to get this working. */
4330 label = gen_label_rtx ();
4331 label_ref = gen_rtx_LABEL_REF (Pmode, label);
4333 /* Compute address to jump to :
4334 label - 5*eax + nnamed_sse_arguments*5 */
4335 tmp_reg = gen_reg_rtx (Pmode);
4336 nsse_reg = gen_reg_rtx (Pmode);
4337 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
4338 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4339 gen_rtx_MULT (Pmode, nsse_reg,
4341 if (next_cum.sse_regno)
4344 gen_rtx_CONST (DImode,
4345 gen_rtx_PLUS (DImode,
4347 GEN_INT (next_cum.sse_regno * 4))));
4349 emit_move_insn (nsse_reg, label_ref);
4350 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
4352 /* Compute address of memory block we save into. We always use pointer
4353 pointing 127 bytes after first byte to store - this is needed to keep
4354 instruction size limited by 4 bytes. */
4355 tmp_reg = gen_reg_rtx (Pmode);
4356 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4357 plus_constant (save_area,
4358 8 * REGPARM_MAX + 127)));
4359 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
4360 MEM_NOTRAP_P (mem) = 1;
4361 set_mem_alias_set (mem, set);
4362 set_mem_align (mem, BITS_PER_WORD);
4364 /* And finally do the dirty job! */
4365 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
4366 GEN_INT (next_cum.sse_regno), label));
4371 /* Implement va_start. */
4374 ix86_va_start (tree valist, rtx nextarg)
4376 HOST_WIDE_INT words, n_gpr, n_fpr;
4377 tree f_gpr, f_fpr, f_ovf, f_sav;
4378 tree gpr, fpr, ovf, sav, t;
4381 /* Only 64bit target needs something special. */
4384 std_expand_builtin_va_start (valist, nextarg);
4388 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4389 f_fpr = TREE_CHAIN (f_gpr);
4390 f_ovf = TREE_CHAIN (f_fpr);
4391 f_sav = TREE_CHAIN (f_ovf);
4393 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
4394 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4395 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4396 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4397 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4399 /* Count number of gp and fp argument registers used. */
4400 words = current_function_args_info.words;
4401 n_gpr = current_function_args_info.regno;
4402 n_fpr = current_function_args_info.sse_regno;
4404 if (TARGET_DEBUG_ARG)
4405 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
4406 (int) words, (int) n_gpr, (int) n_fpr);
4408 if (cfun->va_list_gpr_size)
4410 type = TREE_TYPE (gpr);
4411 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
4412 build_int_cst (type, n_gpr * 8));
4413 TREE_SIDE_EFFECTS (t) = 1;
4414 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4417 if (cfun->va_list_fpr_size)
4419 type = TREE_TYPE (fpr);
4420 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
4421 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
4422 TREE_SIDE_EFFECTS (t) = 1;
4423 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4426 /* Find the overflow area. */
4427 type = TREE_TYPE (ovf);
4428 t = make_tree (type, virtual_incoming_args_rtx);
4430 t = build2 (PLUS_EXPR, type, t,
4431 build_int_cst (type, words * UNITS_PER_WORD));
4432 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
4433 TREE_SIDE_EFFECTS (t) = 1;
4434 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4436 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
4438 /* Find the register save area.
4439 Prologue of the function save it right above stack frame. */
4440 type = TREE_TYPE (sav);
4441 t = make_tree (type, frame_pointer_rtx);
4442 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
4443 TREE_SIDE_EFFECTS (t) = 1;
4444 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4448 /* Implement va_arg. */
4451 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4453 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
4454 tree f_gpr, f_fpr, f_ovf, f_sav;
4455 tree gpr, fpr, ovf, sav, t;
4457 tree lab_false, lab_over = NULL_TREE;
4462 enum machine_mode nat_mode;
4464 /* Only 64bit target needs something special. */
4466 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4468 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4469 f_fpr = TREE_CHAIN (f_gpr);
4470 f_ovf = TREE_CHAIN (f_fpr);
4471 f_sav = TREE_CHAIN (f_ovf);
4473 valist = build_va_arg_indirect_ref (valist);
4474 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4475 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4476 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4477 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4479 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
4481 type = build_pointer_type (type);
4482 size = int_size_in_bytes (type);
4483 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4485 nat_mode = type_natural_mode (type);
4486 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
4487 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
4489 /* Pull the value out of the saved registers. */
4491 addr = create_tmp_var (ptr_type_node, "addr");
4492 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
4496 int needed_intregs, needed_sseregs;
4498 tree int_addr, sse_addr;
4500 lab_false = create_artificial_label ();
4501 lab_over = create_artificial_label ();
4503 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
4505 need_temp = (!REG_P (container)
4506 && ((needed_intregs && TYPE_ALIGN (type) > 64)
4507 || TYPE_ALIGN (type) > 128));
4509 /* In case we are passing structure, verify that it is consecutive block
4510 on the register save area. If not we need to do moves. */
4511 if (!need_temp && !REG_P (container))
4513 /* Verify that all registers are strictly consecutive */
4514 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
4518 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4520 rtx slot = XVECEXP (container, 0, i);
4521 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
4522 || INTVAL (XEXP (slot, 1)) != i * 16)
4530 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4532 rtx slot = XVECEXP (container, 0, i);
4533 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
4534 || INTVAL (XEXP (slot, 1)) != i * 8)
4546 int_addr = create_tmp_var (ptr_type_node, "int_addr");
4547 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
4548 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
4549 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
4552 /* First ensure that we fit completely in registers. */
4555 t = build_int_cst (TREE_TYPE (gpr),
4556 (REGPARM_MAX - needed_intregs + 1) * 8);
4557 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
4558 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4559 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4560 gimplify_and_add (t, pre_p);
4564 t = build_int_cst (TREE_TYPE (fpr),
4565 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
4567 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
4568 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4569 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4570 gimplify_and_add (t, pre_p);
4573 /* Compute index to start of area used for integer regs. */
4576 /* int_addr = gpr + sav; */
4577 t = fold_convert (ptr_type_node, gpr);
4578 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4579 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
4580 gimplify_and_add (t, pre_p);
4584 /* sse_addr = fpr + sav; */
4585 t = fold_convert (ptr_type_node, fpr);
4586 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4587 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
4588 gimplify_and_add (t, pre_p);
4593 tree temp = create_tmp_var (type, "va_arg_tmp");
4596 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4597 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4598 gimplify_and_add (t, pre_p);
4600 for (i = 0; i < XVECLEN (container, 0); i++)
4602 rtx slot = XVECEXP (container, 0, i);
4603 rtx reg = XEXP (slot, 0);
4604 enum machine_mode mode = GET_MODE (reg);
4605 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4606 tree addr_type = build_pointer_type (piece_type);
4609 tree dest_addr, dest;
4611 if (SSE_REGNO_P (REGNO (reg)))
4613 src_addr = sse_addr;
4614 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4618 src_addr = int_addr;
4619 src_offset = REGNO (reg) * 8;
4621 src_addr = fold_convert (addr_type, src_addr);
4622 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4623 size_int (src_offset)));
4624 src = build_va_arg_indirect_ref (src_addr);
4626 dest_addr = fold_convert (addr_type, addr);
4627 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4628 size_int (INTVAL (XEXP (slot, 1)))));
4629 dest = build_va_arg_indirect_ref (dest_addr);
4631 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
4632 gimplify_and_add (t, pre_p);
4638 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4639 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4640 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
4641 gimplify_and_add (t, pre_p);
4645 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4646 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4647 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
4648 gimplify_and_add (t, pre_p);
4651 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4652 gimplify_and_add (t, pre_p);
4654 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4655 append_to_statement_list (t, pre_p);
4658 /* ... otherwise out of the overflow area. */
4660 /* Care for on-stack alignment if needed. */
4661 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
4662 || integer_zerop (TYPE_SIZE (type)))
4666 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4667 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4668 build_int_cst (TREE_TYPE (ovf), align - 1));
4669 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4670 build_int_cst (TREE_TYPE (t), -align));
4672 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4674 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4675 gimplify_and_add (t2, pre_p);
4677 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4678 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4679 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
4680 gimplify_and_add (t, pre_p);
4684 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4685 append_to_statement_list (t, pre_p);
4688 ptrtype = build_pointer_type (type);
4689 addr = fold_convert (ptrtype, addr);
4692 addr = build_va_arg_indirect_ref (addr);
4693 return build_va_arg_indirect_ref (addr);
4696 /* Return nonzero if OPNUM's MEM should be matched
4697 in movabs* patterns. */
4700 ix86_check_movabs (rtx insn, int opnum)
4704 set = PATTERN (insn);
4705 if (GET_CODE (set) == PARALLEL)
4706 set = XVECEXP (set, 0, 0);
4707 gcc_assert (GET_CODE (set) == SET);
4708 mem = XEXP (set, opnum);
4709 while (GET_CODE (mem) == SUBREG)
4710 mem = SUBREG_REG (mem);
4711 gcc_assert (GET_CODE (mem) == MEM);
4712 return (volatile_ok || !MEM_VOLATILE_P (mem));
4715 /* Initialize the table of extra 80387 mathematical constants. */
4718 init_ext_80387_constants (void)
4720 static const char * cst[5] =
4722 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4723 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4724 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4725 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4726 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4730 for (i = 0; i < 5; i++)
4732 real_from_string (&ext_80387_constants_table[i], cst[i]);
4733 /* Ensure each constant is rounded to XFmode precision. */
4734 real_convert (&ext_80387_constants_table[i],
4735 XFmode, &ext_80387_constants_table[i]);
4738 ext_80387_constants_init = 1;
4741 /* Return true if the constant is something that can be loaded with
4742 a special instruction. */
4745 standard_80387_constant_p (rtx x)
4749 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4752 if (x == CONST0_RTX (GET_MODE (x)))
4754 if (x == CONST1_RTX (GET_MODE (x)))
4757 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4759 /* For XFmode constants, try to find a special 80387 instruction when
4760 optimizing for size or on those CPUs that benefit from them. */
4761 if (GET_MODE (x) == XFmode
4762 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4766 if (! ext_80387_constants_init)
4767 init_ext_80387_constants ();
4769 for (i = 0; i < 5; i++)
4770 if (real_identical (&r, &ext_80387_constants_table[i]))
4774 /* Load of the constant -0.0 or -1.0 will be split as
4775 fldz;fchs or fld1;fchs sequence. */
4776 if (real_isnegzero (&r))
4778 if (real_identical (&r, &dconstm1))
4784 /* Return the opcode of the special instruction to be used to load
4788 standard_80387_constant_opcode (rtx x)
4790 switch (standard_80387_constant_p (x))
4814 /* Return the CONST_DOUBLE representing the 80387 constant that is
4815 loaded by the specified special instruction. The argument IDX
4816 matches the return value from standard_80387_constant_p. */
4819 standard_80387_constant_rtx (int idx)
4823 if (! ext_80387_constants_init)
4824 init_ext_80387_constants ();
4840 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4844 /* Return 1 if mode is a valid mode for sse. */
4846 standard_sse_mode_p (enum machine_mode mode)
4863 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4866 standard_sse_constant_p (rtx x)
4868 enum machine_mode mode = GET_MODE (x);
4870 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
4872 if (vector_all_ones_operand (x, mode)
4873 && standard_sse_mode_p (mode))
4874 return TARGET_SSE2 ? 2 : -1;
4879 /* Return the opcode of the special instruction to be used to load
4883 standard_sse_constant_opcode (rtx insn, rtx x)
4885 switch (standard_sse_constant_p (x))
4888 if (get_attr_mode (insn) == MODE_V4SF)
4889 return "xorps\t%0, %0";
4890 else if (get_attr_mode (insn) == MODE_V2DF)
4891 return "xorpd\t%0, %0";
4893 return "pxor\t%0, %0";
4895 return "pcmpeqd\t%0, %0";
4900 /* Returns 1 if OP contains a symbol reference */
4903 symbolic_reference_mentioned_p (rtx op)
4908 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4911 fmt = GET_RTX_FORMAT (GET_CODE (op));
4912 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4918 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4919 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4923 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4930 /* Return 1 if it is appropriate to emit `ret' instructions in the
4931 body of a function. Do this only if the epilogue is simple, needing a
4932 couple of insns. Prior to reloading, we can't tell how many registers
4933 must be saved, so return 0 then. Return 0 if there is no frame
4934 marker to de-allocate. */
4937 ix86_can_use_return_insn_p (void)
4939 struct ix86_frame frame;
4941 if (! reload_completed || frame_pointer_needed)
4944 /* Don't allow more than 32 pop, since that's all we can do
4945 with one instruction. */
4946 if (current_function_pops_args
4947 && current_function_args_size >= 32768)
4950 ix86_compute_frame_layout (&frame);
4951 return frame.to_allocate == 0 && frame.nregs == 0;
4954 /* Value should be nonzero if functions must have frame pointers.
4955 Zero means the frame pointer need not be set up (and parms may
4956 be accessed via the stack pointer) in functions that seem suitable. */
4959 ix86_frame_pointer_required (void)
4961 /* If we accessed previous frames, then the generated code expects
4962 to be able to access the saved ebp value in our frame. */
4963 if (cfun->machine->accesses_prev_frame)
4966 /* Several x86 os'es need a frame pointer for other reasons,
4967 usually pertaining to setjmp. */
4968 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4971 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4972 the frame pointer by default. Turn it back on now if we've not
4973 got a leaf function. */
4974 if (TARGET_OMIT_LEAF_FRAME_POINTER
4975 && (!current_function_is_leaf
4976 || ix86_current_function_calls_tls_descriptor))
4979 if (current_function_profile)
4985 /* Record that the current function accesses previous call frames. */
4988 ix86_setup_frame_addresses (void)
4990 cfun->machine->accesses_prev_frame = 1;
4993 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
4994 # define USE_HIDDEN_LINKONCE 1
4996 # define USE_HIDDEN_LINKONCE 0
4999 static int pic_labels_used;
5001 /* Fills in the label name that should be used for a pc thunk for
5002 the given register. */
5005 get_pc_thunk_name (char name[32], unsigned int regno)
5007 gcc_assert (!TARGET_64BIT);
5009 if (USE_HIDDEN_LINKONCE)
5010 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5012 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5016 /* This function generates code for -fpic that loads %ebx with
5017 the return address of the caller and then returns. */
5020 ix86_file_end (void)
5025 for (regno = 0; regno < 8; ++regno)
5029 if (! ((pic_labels_used >> regno) & 1))
5032 get_pc_thunk_name (name, regno);
5037 switch_to_section (darwin_sections[text_coal_section]);
5038 fputs ("\t.weak_definition\t", asm_out_file);
5039 assemble_name (asm_out_file, name);
5040 fputs ("\n\t.private_extern\t", asm_out_file);
5041 assemble_name (asm_out_file, name);
5042 fputs ("\n", asm_out_file);
5043 ASM_OUTPUT_LABEL (asm_out_file, name);
5047 if (USE_HIDDEN_LINKONCE)
5051 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5053 TREE_PUBLIC (decl) = 1;
5054 TREE_STATIC (decl) = 1;
5055 DECL_ONE_ONLY (decl) = 1;
5057 (*targetm.asm_out.unique_section) (decl, 0);
5058 switch_to_section (get_named_section (decl, NULL, 0));
5060 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5061 fputs ("\t.hidden\t", asm_out_file);
5062 assemble_name (asm_out_file, name);
5063 fputc ('\n', asm_out_file);
5064 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5068 switch_to_section (text_section);
5069 ASM_OUTPUT_LABEL (asm_out_file, name);
5072 xops[0] = gen_rtx_REG (SImode, regno);
5073 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
5074 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5075 output_asm_insn ("ret", xops);
5078 if (NEED_INDICATE_EXEC_STACK)
5079 file_end_indicate_exec_stack ();
5082 /* Emit code for the SET_GOT patterns. */
5085 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5090 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5092 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5094 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5097 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5099 output_asm_insn ("call\t%a2", xops);
5102 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5103 is what will be referenced by the Mach-O PIC subsystem. */
5105 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5108 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5109 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5112 output_asm_insn ("pop{l}\t%0", xops);
5117 get_pc_thunk_name (name, REGNO (dest));
5118 pic_labels_used |= 1 << REGNO (dest);
5120 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5121 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5122 output_asm_insn ("call\t%X2", xops);
5123 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5124 is what will be referenced by the Mach-O PIC subsystem. */
5127 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5129 targetm.asm_out.internal_label (asm_out_file, "L",
5130 CODE_LABEL_NUMBER (label));
5137 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5138 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5140 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5145 /* Generate an "push" pattern for input ARG. */
5150 return gen_rtx_SET (VOIDmode,
5152 gen_rtx_PRE_DEC (Pmode,
5153 stack_pointer_rtx)),
5157 /* Return >= 0 if there is an unused call-clobbered register available
5158 for the entire function. */
5161 ix86_select_alt_pic_regnum (void)
5163 if (current_function_is_leaf && !current_function_profile
5164 && !ix86_current_function_calls_tls_descriptor)
5167 for (i = 2; i >= 0; --i)
5168 if (!regs_ever_live[i])
5172 return INVALID_REGNUM;
5175 /* Return 1 if we need to save REGNO. */
5177 ix86_save_reg (unsigned int regno, int maybe_eh_return)
5179 if (pic_offset_table_rtx
5180 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
5181 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5182 || current_function_profile
5183 || current_function_calls_eh_return
5184 || current_function_uses_const_pool))
5186 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
5191 if (current_function_calls_eh_return && maybe_eh_return)
5196 unsigned test = EH_RETURN_DATA_REGNO (i);
5197 if (test == INVALID_REGNUM)
5204 if (cfun->machine->force_align_arg_pointer
5205 && regno == REGNO (cfun->machine->force_align_arg_pointer))
5208 return (regs_ever_live[regno]
5209 && !call_used_regs[regno]
5210 && !fixed_regs[regno]
5211 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
5214 /* Return number of registers to be saved on the stack. */
5217 ix86_nsaved_regs (void)
5222 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
5223 if (ix86_save_reg (regno, true))
5228 /* Return the offset between two registers, one to be eliminated, and the other
5229 its replacement, at the start of a routine. */
5232 ix86_initial_elimination_offset (int from, int to)
5234 struct ix86_frame frame;
5235 ix86_compute_frame_layout (&frame);
5237 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5238 return frame.hard_frame_pointer_offset;
5239 else if (from == FRAME_POINTER_REGNUM
5240 && to == HARD_FRAME_POINTER_REGNUM)
5241 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
5244 gcc_assert (to == STACK_POINTER_REGNUM);
5246 if (from == ARG_POINTER_REGNUM)
5247 return frame.stack_pointer_offset;
5249 gcc_assert (from == FRAME_POINTER_REGNUM);
5250 return frame.stack_pointer_offset - frame.frame_pointer_offset;
5254 /* Fill structure ix86_frame about frame of currently computed function. */
5257 ix86_compute_frame_layout (struct ix86_frame *frame)
5259 HOST_WIDE_INT total_size;
5260 unsigned int stack_alignment_needed;
5261 HOST_WIDE_INT offset;
5262 unsigned int preferred_alignment;
5263 HOST_WIDE_INT size = get_frame_size ();
5265 frame->nregs = ix86_nsaved_regs ();
5268 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
5269 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
5271 /* During reload iteration the amount of registers saved can change.
5272 Recompute the value as needed. Do not recompute when amount of registers
5273 didn't change as reload does multiple calls to the function and does not
5274 expect the decision to change within single iteration. */
5276 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
5278 int count = frame->nregs;
5280 cfun->machine->use_fast_prologue_epilogue_nregs = count;
5281 /* The fast prologue uses move instead of push to save registers. This
5282 is significantly longer, but also executes faster as modern hardware
5283 can execute the moves in parallel, but can't do that for push/pop.
5285 Be careful about choosing what prologue to emit: When function takes
5286 many instructions to execute we may use slow version as well as in
5287 case function is known to be outside hot spot (this is known with
5288 feedback only). Weight the size of function by number of registers
5289 to save as it is cheap to use one or two push instructions but very
5290 slow to use many of them. */
5292 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
5293 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
5294 || (flag_branch_probabilities
5295 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
5296 cfun->machine->use_fast_prologue_epilogue = false;
5298 cfun->machine->use_fast_prologue_epilogue
5299 = !expensive_function_p (count);
5301 if (TARGET_PROLOGUE_USING_MOVE
5302 && cfun->machine->use_fast_prologue_epilogue)
5303 frame->save_regs_using_mov = true;
5305 frame->save_regs_using_mov = false;
5308 /* Skip return address and saved base pointer. */
5309 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
5311 frame->hard_frame_pointer_offset = offset;
5313 /* Do some sanity checking of stack_alignment_needed and
5314 preferred_alignment, since i386 port is the only using those features
5315 that may break easily. */
5317 gcc_assert (!size || stack_alignment_needed);
5318 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
5319 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5320 gcc_assert (stack_alignment_needed
5321 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5323 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
5324 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
5326 /* Register save area */
5327 offset += frame->nregs * UNITS_PER_WORD;
5330 if (ix86_save_varrargs_registers)
5332 offset += X86_64_VARARGS_SIZE;
5333 frame->va_arg_size = X86_64_VARARGS_SIZE;
5336 frame->va_arg_size = 0;
5338 /* Align start of frame for local function. */
5339 frame->padding1 = ((offset + stack_alignment_needed - 1)
5340 & -stack_alignment_needed) - offset;
5342 offset += frame->padding1;
5344 /* Frame pointer points here. */
5345 frame->frame_pointer_offset = offset;
5349 /* Add outgoing arguments area. Can be skipped if we eliminated
5350 all the function calls as dead code.
5351 Skipping is however impossible when function calls alloca. Alloca
5352 expander assumes that last current_function_outgoing_args_size
5353 of stack frame are unused. */
5354 if (ACCUMULATE_OUTGOING_ARGS
5355 && (!current_function_is_leaf || current_function_calls_alloca
5356 || ix86_current_function_calls_tls_descriptor))
5358 offset += current_function_outgoing_args_size;
5359 frame->outgoing_arguments_size = current_function_outgoing_args_size;
5362 frame->outgoing_arguments_size = 0;
5364 /* Align stack boundary. Only needed if we're calling another function
5366 if (!current_function_is_leaf || current_function_calls_alloca
5367 || ix86_current_function_calls_tls_descriptor)
5368 frame->padding2 = ((offset + preferred_alignment - 1)
5369 & -preferred_alignment) - offset;
5371 frame->padding2 = 0;
5373 offset += frame->padding2;
5375 /* We've reached end of stack frame. */
5376 frame->stack_pointer_offset = offset;
5378 /* Size prologue needs to allocate. */
5379 frame->to_allocate =
5380 (size + frame->padding1 + frame->padding2
5381 + frame->outgoing_arguments_size + frame->va_arg_size);
5383 if ((!frame->to_allocate && frame->nregs <= 1)
5384 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
5385 frame->save_regs_using_mov = false;
5387 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
5388 && current_function_is_leaf
5389 && !ix86_current_function_calls_tls_descriptor)
5391 frame->red_zone_size = frame->to_allocate;
5392 if (frame->save_regs_using_mov)
5393 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
5394 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
5395 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
5398 frame->red_zone_size = 0;
5399 frame->to_allocate -= frame->red_zone_size;
5400 frame->stack_pointer_offset -= frame->red_zone_size;
5402 fprintf (stderr, "nregs: %i\n", frame->nregs);
5403 fprintf (stderr, "size: %i\n", size);
5404 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
5405 fprintf (stderr, "padding1: %i\n", frame->padding1);
5406 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
5407 fprintf (stderr, "padding2: %i\n", frame->padding2);
5408 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
5409 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
5410 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
5411 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
5412 frame->hard_frame_pointer_offset);
5413 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
5417 /* Emit code to save registers in the prologue. */
5420 ix86_emit_save_regs (void)
5425 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
5426 if (ix86_save_reg (regno, true))
5428 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
5429 RTX_FRAME_RELATED_P (insn) = 1;
5433 /* Emit code to save registers using MOV insns. First register
5434 is restored from POINTER + OFFSET. */
5436 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
5441 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5442 if (ix86_save_reg (regno, true))
5444 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
5446 gen_rtx_REG (Pmode, regno));
5447 RTX_FRAME_RELATED_P (insn) = 1;
5448 offset += UNITS_PER_WORD;
5452 /* Expand prologue or epilogue stack adjustment.
5453 The pattern exist to put a dependency on all ebp-based memory accesses.
5454 STYLE should be negative if instructions should be marked as frame related,
5455 zero if %r11 register is live and cannot be freely used and positive
5459 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
5464 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
5465 else if (x86_64_immediate_operand (offset, DImode))
5466 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
5470 /* r11 is used by indirect sibcall return as well, set before the
5471 epilogue and used after the epilogue. ATM indirect sibcall
5472 shouldn't be used together with huge frame sizes in one
5473 function because of the frame_size check in sibcall.c. */
5475 r11 = gen_rtx_REG (DImode, R11_REG);
5476 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
5478 RTX_FRAME_RELATED_P (insn) = 1;
5479 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
5483 RTX_FRAME_RELATED_P (insn) = 1;
5486 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
5489 ix86_internal_arg_pointer (void)
5491 bool has_force_align_arg_pointer =
5492 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
5493 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
5494 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
5495 && DECL_NAME (current_function_decl)
5496 && MAIN_NAME_P (DECL_NAME (current_function_decl))
5497 && DECL_FILE_SCOPE_P (current_function_decl))
5498 || ix86_force_align_arg_pointer
5499 || has_force_align_arg_pointer)
5501 /* Nested functions can't realign the stack due to a register
5503 if (DECL_CONTEXT (current_function_decl)
5504 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
5506 if (ix86_force_align_arg_pointer)
5507 warning (0, "-mstackrealign ignored for nested functions");
5508 if (has_force_align_arg_pointer)
5509 error ("%s not supported for nested functions",
5510 ix86_force_align_arg_pointer_string);
5511 return virtual_incoming_args_rtx;
5513 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
5514 return copy_to_reg (cfun->machine->force_align_arg_pointer);
5517 return virtual_incoming_args_rtx;
5520 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
5521 This is called from dwarf2out.c to emit call frame instructions
5522 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
5524 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
5526 rtx unspec = SET_SRC (pattern);
5527 gcc_assert (GET_CODE (unspec) == UNSPEC);
5531 case UNSPEC_REG_SAVE:
5532 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
5533 SET_DEST (pattern));
5535 case UNSPEC_DEF_CFA:
5536 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
5537 INTVAL (XVECEXP (unspec, 0, 0)));
5544 /* Expand the prologue into a bunch of separate insns. */
5547 ix86_expand_prologue (void)
5551 struct ix86_frame frame;
5552 HOST_WIDE_INT allocate;
5554 ix86_compute_frame_layout (&frame);
5556 if (cfun->machine->force_align_arg_pointer)
5560 /* Grab the argument pointer. */
5561 x = plus_constant (stack_pointer_rtx, 4);
5562 y = cfun->machine->force_align_arg_pointer;
5563 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
5564 RTX_FRAME_RELATED_P (insn) = 1;
5566 /* The unwind info consists of two parts: install the fafp as the cfa,
5567 and record the fafp as the "save register" of the stack pointer.
5568 The later is there in order that the unwinder can see where it
5569 should restore the stack pointer across the and insn. */
5570 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
5571 x = gen_rtx_SET (VOIDmode, y, x);
5572 RTX_FRAME_RELATED_P (x) = 1;
5573 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
5575 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
5576 RTX_FRAME_RELATED_P (y) = 1;
5577 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
5578 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5579 REG_NOTES (insn) = x;
5581 /* Align the stack. */
5582 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
5585 /* And here we cheat like madmen with the unwind info. We force the
5586 cfa register back to sp+4, which is exactly what it was at the
5587 start of the function. Re-pushing the return address results in
5588 the return at the same spot relative to the cfa, and thus is
5589 correct wrt the unwind info. */
5590 x = cfun->machine->force_align_arg_pointer;
5591 x = gen_frame_mem (Pmode, plus_constant (x, -4));
5592 insn = emit_insn (gen_push (x));
5593 RTX_FRAME_RELATED_P (insn) = 1;
5596 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
5597 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
5598 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5599 REG_NOTES (insn) = x;
5602 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5603 slower on all targets. Also sdb doesn't like it. */
5605 if (frame_pointer_needed)
5607 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5608 RTX_FRAME_RELATED_P (insn) = 1;
5610 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5611 RTX_FRAME_RELATED_P (insn) = 1;
5614 allocate = frame.to_allocate;
5616 if (!frame.save_regs_using_mov)
5617 ix86_emit_save_regs ();
5619 allocate += frame.nregs * UNITS_PER_WORD;
5621 /* When using red zone we may start register saving before allocating
5622 the stack frame saving one cycle of the prologue. */
5623 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5624 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5625 : stack_pointer_rtx,
5626 -frame.nregs * UNITS_PER_WORD);
5630 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5631 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5632 GEN_INT (-allocate), -1);
5635 /* Only valid for Win32. */
5636 rtx eax = gen_rtx_REG (SImode, 0);
5637 bool eax_live = ix86_eax_live_at_start_p ();
5640 gcc_assert (!TARGET_64BIT);
5644 emit_insn (gen_push (eax));
5648 emit_move_insn (eax, GEN_INT (allocate));
5650 insn = emit_insn (gen_allocate_stack_worker (eax));
5651 RTX_FRAME_RELATED_P (insn) = 1;
5652 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
5653 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
5654 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5655 t, REG_NOTES (insn));
5659 if (frame_pointer_needed)
5660 t = plus_constant (hard_frame_pointer_rtx,
5663 - frame.nregs * UNITS_PER_WORD);
5665 t = plus_constant (stack_pointer_rtx, allocate);
5666 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
5670 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5672 if (!frame_pointer_needed || !frame.to_allocate)
5673 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5675 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5676 -frame.nregs * UNITS_PER_WORD);
5679 pic_reg_used = false;
5680 if (pic_offset_table_rtx
5681 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5682 || current_function_profile))
5684 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5686 if (alt_pic_reg_used != INVALID_REGNUM)
5687 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5689 pic_reg_used = true;
5695 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5697 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5699 /* Even with accurate pre-reload life analysis, we can wind up
5700 deleting all references to the pic register after reload.
5701 Consider if cross-jumping unifies two sides of a branch
5702 controlled by a comparison vs the only read from a global.
5703 In which case, allow the set_got to be deleted, though we're
5704 too late to do anything about the ebx save in the prologue. */
5705 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5708 /* Prevent function calls from be scheduled before the call to mcount.
5709 In the pic_reg_used case, make sure that the got load isn't deleted. */
5710 if (current_function_profile)
5711 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5714 /* Emit code to restore saved registers using MOV insns. First register
5715 is restored from POINTER + OFFSET. */
5717 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5718 int maybe_eh_return)
5721 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5723 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5724 if (ix86_save_reg (regno, maybe_eh_return))
5726 /* Ensure that adjust_address won't be forced to produce pointer
5727 out of range allowed by x86-64 instruction set. */
5728 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5732 r11 = gen_rtx_REG (DImode, R11_REG);
5733 emit_move_insn (r11, GEN_INT (offset));
5734 emit_insn (gen_adddi3 (r11, r11, pointer));
5735 base_address = gen_rtx_MEM (Pmode, r11);
5738 emit_move_insn (gen_rtx_REG (Pmode, regno),
5739 adjust_address (base_address, Pmode, offset));
5740 offset += UNITS_PER_WORD;
5744 /* Restore function stack, frame, and registers. */
5747 ix86_expand_epilogue (int style)
5750 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5751 struct ix86_frame frame;
5752 HOST_WIDE_INT offset;
5754 ix86_compute_frame_layout (&frame);
5756 /* Calculate start of saved registers relative to ebp. Special care
5757 must be taken for the normal return case of a function using
5758 eh_return: the eax and edx registers are marked as saved, but not
5759 restored along this path. */
5760 offset = frame.nregs;
5761 if (current_function_calls_eh_return && style != 2)
5763 offset *= -UNITS_PER_WORD;
5765 /* If we're only restoring one register and sp is not valid then
5766 using a move instruction to restore the register since it's
5767 less work than reloading sp and popping the register.
5769 The default code result in stack adjustment using add/lea instruction,
5770 while this code results in LEAVE instruction (or discrete equivalent),
5771 so it is profitable in some other cases as well. Especially when there
5772 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5773 and there is exactly one register to pop. This heuristic may need some
5774 tuning in future. */
5775 if ((!sp_valid && frame.nregs <= 1)
5776 || (TARGET_EPILOGUE_USING_MOVE
5777 && cfun->machine->use_fast_prologue_epilogue
5778 && (frame.nregs > 1 || frame.to_allocate))
5779 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5780 || (frame_pointer_needed && TARGET_USE_LEAVE
5781 && cfun->machine->use_fast_prologue_epilogue
5782 && frame.nregs == 1)
5783 || current_function_calls_eh_return)
5785 /* Restore registers. We can use ebp or esp to address the memory
5786 locations. If both are available, default to ebp, since offsets
5787 are known to be small. Only exception is esp pointing directly to the
5788 end of block of saved registers, where we may simplify addressing
5791 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5792 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5793 frame.to_allocate, style == 2);
5795 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5796 offset, style == 2);
5798 /* eh_return epilogues need %ecx added to the stack pointer. */
5801 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5803 if (frame_pointer_needed)
5805 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5806 tmp = plus_constant (tmp, UNITS_PER_WORD);
5807 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5809 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5810 emit_move_insn (hard_frame_pointer_rtx, tmp);
5812 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5817 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5818 tmp = plus_constant (tmp, (frame.to_allocate
5819 + frame.nregs * UNITS_PER_WORD));
5820 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5823 else if (!frame_pointer_needed)
5824 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5825 GEN_INT (frame.to_allocate
5826 + frame.nregs * UNITS_PER_WORD),
5828 /* If not an i386, mov & pop is faster than "leave". */
5829 else if (TARGET_USE_LEAVE || optimize_size
5830 || !cfun->machine->use_fast_prologue_epilogue)
5831 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5834 pro_epilogue_adjust_stack (stack_pointer_rtx,
5835 hard_frame_pointer_rtx,
5838 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5840 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5845 /* First step is to deallocate the stack frame so that we can
5846 pop the registers. */
5849 gcc_assert (frame_pointer_needed);
5850 pro_epilogue_adjust_stack (stack_pointer_rtx,
5851 hard_frame_pointer_rtx,
5852 GEN_INT (offset), style);
5854 else if (frame.to_allocate)
5855 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5856 GEN_INT (frame.to_allocate), style);
5858 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5859 if (ix86_save_reg (regno, false))
5862 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5864 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5866 if (frame_pointer_needed)
5868 /* Leave results in shorter dependency chains on CPUs that are
5869 able to grok it fast. */
5870 if (TARGET_USE_LEAVE)
5871 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5872 else if (TARGET_64BIT)
5873 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5875 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5879 if (cfun->machine->force_align_arg_pointer)
5881 emit_insn (gen_addsi3 (stack_pointer_rtx,
5882 cfun->machine->force_align_arg_pointer,
5886 /* Sibcall epilogues don't want a return instruction. */
5890 if (current_function_pops_args && current_function_args_size)
5892 rtx popc = GEN_INT (current_function_pops_args);
5894 /* i386 can only pop 64K bytes. If asked to pop more, pop
5895 return address, do explicit add, and jump indirectly to the
5898 if (current_function_pops_args >= 65536)
5900 rtx ecx = gen_rtx_REG (SImode, 2);
5902 /* There is no "pascal" calling convention in 64bit ABI. */
5903 gcc_assert (!TARGET_64BIT);
5905 emit_insn (gen_popsi1 (ecx));
5906 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5907 emit_jump_insn (gen_return_indirect_internal (ecx));
5910 emit_jump_insn (gen_return_pop_internal (popc));
5913 emit_jump_insn (gen_return_internal ());
5916 /* Reset from the function's potential modifications. */
5919 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5920 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5922 if (pic_offset_table_rtx)
5923 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5925 /* Mach-O doesn't support labels at the end of objects, so if
5926 it looks like we might want one, insert a NOP. */
5928 rtx insn = get_last_insn ();
5931 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
5932 insn = PREV_INSN (insn);
5936 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
5937 fputs ("\tnop\n", file);
5943 /* Extract the parts of an RTL expression that is a valid memory address
5944 for an instruction. Return 0 if the structure of the address is
5945 grossly off. Return -1 if the address contains ASHIFT, so it is not
5946 strictly valid, but still used for computing length of lea instruction. */
5949 ix86_decompose_address (rtx addr, struct ix86_address *out)
5951 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5952 rtx base_reg, index_reg;
5953 HOST_WIDE_INT scale = 1;
5954 rtx scale_rtx = NULL_RTX;
5956 enum ix86_address_seg seg = SEG_DEFAULT;
5958 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5960 else if (GET_CODE (addr) == PLUS)
5970 addends[n++] = XEXP (op, 1);
5973 while (GET_CODE (op) == PLUS);
5978 for (i = n; i >= 0; --i)
5981 switch (GET_CODE (op))
5986 index = XEXP (op, 0);
5987 scale_rtx = XEXP (op, 1);
5991 if (XINT (op, 1) == UNSPEC_TP
5992 && TARGET_TLS_DIRECT_SEG_REFS
5993 && seg == SEG_DEFAULT)
5994 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
6023 else if (GET_CODE (addr) == MULT)
6025 index = XEXP (addr, 0); /* index*scale */
6026 scale_rtx = XEXP (addr, 1);
6028 else if (GET_CODE (addr) == ASHIFT)
6032 /* We're called for lea too, which implements ashift on occasion. */
6033 index = XEXP (addr, 0);
6034 tmp = XEXP (addr, 1);
6035 if (GET_CODE (tmp) != CONST_INT)
6037 scale = INTVAL (tmp);
6038 if ((unsigned HOST_WIDE_INT) scale > 3)
6044 disp = addr; /* displacement */
6046 /* Extract the integral value of scale. */
6049 if (GET_CODE (scale_rtx) != CONST_INT)
6051 scale = INTVAL (scale_rtx);
6054 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6055 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6057 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6058 if (base_reg && index_reg && scale == 1
6059 && (index_reg == arg_pointer_rtx
6060 || index_reg == frame_pointer_rtx
6061 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6064 tmp = base, base = index, index = tmp;
6065 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6068 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6069 if ((base_reg == hard_frame_pointer_rtx
6070 || base_reg == frame_pointer_rtx
6071 || base_reg == arg_pointer_rtx) && !disp)
6074 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6075 Avoid this by transforming to [%esi+0]. */
6076 if (ix86_tune == PROCESSOR_K6 && !optimize_size
6077 && base_reg && !index_reg && !disp
6079 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6082 /* Special case: encode reg+reg instead of reg*2. */
6083 if (!base && index && scale && scale == 2)
6084 base = index, base_reg = index_reg, scale = 1;
6086 /* Special case: scaling cannot be encoded without base or displacement. */
6087 if (!base && !disp && index && scale != 1)
6099 /* Return cost of the memory address x.
6100 For i386, it is better to use a complex address than let gcc copy
6101 the address into a reg and make a new pseudo. But not if the address
6102 requires to two regs - that would mean more pseudos with longer
6105 ix86_address_cost (rtx x)
6107 struct ix86_address parts;
6109 int ok = ix86_decompose_address (x, &parts);
6113 if (parts.base && GET_CODE (parts.base) == SUBREG)
6114 parts.base = SUBREG_REG (parts.base);
6115 if (parts.index && GET_CODE (parts.index) == SUBREG)
6116 parts.index = SUBREG_REG (parts.index);
6118 /* More complex memory references are better. */
6119 if (parts.disp && parts.disp != const0_rtx)
6121 if (parts.seg != SEG_DEFAULT)
6124 /* Attempt to minimize number of registers in the address. */
6126 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
6128 && (!REG_P (parts.index)
6129 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
6133 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
6135 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
6136 && parts.base != parts.index)
6139 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
6140 since it's predecode logic can't detect the length of instructions
6141 and it degenerates to vector decoded. Increase cost of such
6142 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
6143 to split such addresses or even refuse such addresses at all.
6145 Following addressing modes are affected:
6150 The first and last case may be avoidable by explicitly coding the zero in
6151 memory address, but I don't have AMD-K6 machine handy to check this
6155 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
6156 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
6157 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
6163 /* If X is a machine specific address (i.e. a symbol or label being
6164 referenced as a displacement from the GOT implemented using an
6165 UNSPEC), then return the base term. Otherwise return X. */
6168 ix86_find_base_term (rtx x)
6174 if (GET_CODE (x) != CONST)
6177 if (GET_CODE (term) == PLUS
6178 && (GET_CODE (XEXP (term, 1)) == CONST_INT
6179 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
6180 term = XEXP (term, 0);
6181 if (GET_CODE (term) != UNSPEC
6182 || XINT (term, 1) != UNSPEC_GOTPCREL)
6185 term = XVECEXP (term, 0, 0);
6187 if (GET_CODE (term) != SYMBOL_REF
6188 && GET_CODE (term) != LABEL_REF)
6194 term = ix86_delegitimize_address (x);
6196 if (GET_CODE (term) != SYMBOL_REF
6197 && GET_CODE (term) != LABEL_REF)
6203 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
6204 this is used for to form addresses to local data when -fPIC is in
6208 darwin_local_data_pic (rtx disp)
6210 if (GET_CODE (disp) == MINUS)
6212 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
6213 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
6214 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
6216 const char *sym_name = XSTR (XEXP (disp, 1), 0);
6217 if (! strcmp (sym_name, "<pic base>"))
6225 /* Determine if a given RTX is a valid constant. We already know this
6226 satisfies CONSTANT_P. */
6229 legitimate_constant_p (rtx x)
6231 switch (GET_CODE (x))
6236 if (GET_CODE (x) == PLUS)
6238 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6243 if (TARGET_MACHO && darwin_local_data_pic (x))
6246 /* Only some unspecs are valid as "constants". */
6247 if (GET_CODE (x) == UNSPEC)
6248 switch (XINT (x, 1))
6251 return TARGET_64BIT;
6254 x = XVECEXP (x, 0, 0);
6255 return (GET_CODE (x) == SYMBOL_REF
6256 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6258 x = XVECEXP (x, 0, 0);
6259 return (GET_CODE (x) == SYMBOL_REF
6260 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
6265 /* We must have drilled down to a symbol. */
6266 if (GET_CODE (x) == LABEL_REF)
6268 if (GET_CODE (x) != SYMBOL_REF)
6273 /* TLS symbols are never valid. */
6274 if (SYMBOL_REF_TLS_MODEL (x))
6279 if (GET_MODE (x) == TImode
6280 && x != CONST0_RTX (TImode)
6286 if (x == CONST0_RTX (GET_MODE (x)))
6294 /* Otherwise we handle everything else in the move patterns. */
6298 /* Determine if it's legal to put X into the constant pool. This
6299 is not possible for the address of thread-local symbols, which
6300 is checked above. */
6303 ix86_cannot_force_const_mem (rtx x)
6305 /* We can always put integral constants and vectors in memory. */
6306 switch (GET_CODE (x))
6316 return !legitimate_constant_p (x);
6319 /* Determine if a given RTX is a valid constant address. */
6322 constant_address_p (rtx x)
6324 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
6327 /* Nonzero if the constant value X is a legitimate general operand
6328 when generating PIC code. It is given that flag_pic is on and
6329 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
6332 legitimate_pic_operand_p (rtx x)
6336 switch (GET_CODE (x))
6339 inner = XEXP (x, 0);
6340 if (GET_CODE (inner) == PLUS
6341 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
6342 inner = XEXP (inner, 0);
6344 /* Only some unspecs are valid as "constants". */
6345 if (GET_CODE (inner) == UNSPEC)
6346 switch (XINT (inner, 1))
6349 return TARGET_64BIT;
6351 x = XVECEXP (inner, 0, 0);
6352 return (GET_CODE (x) == SYMBOL_REF
6353 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6361 return legitimate_pic_address_disp_p (x);
6368 /* Determine if a given CONST RTX is a valid memory displacement
6372 legitimate_pic_address_disp_p (rtx disp)
6376 /* In 64bit mode we can allow direct addresses of symbols and labels
6377 when they are not dynamic symbols. */
6380 rtx op0 = disp, op1;
6382 switch (GET_CODE (disp))
6388 if (GET_CODE (XEXP (disp, 0)) != PLUS)
6390 op0 = XEXP (XEXP (disp, 0), 0);
6391 op1 = XEXP (XEXP (disp, 0), 1);
6392 if (GET_CODE (op1) != CONST_INT
6393 || INTVAL (op1) >= 16*1024*1024
6394 || INTVAL (op1) < -16*1024*1024)
6396 if (GET_CODE (op0) == LABEL_REF)
6398 if (GET_CODE (op0) != SYMBOL_REF)
6403 /* TLS references should always be enclosed in UNSPEC. */
6404 if (SYMBOL_REF_TLS_MODEL (op0))
6406 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
6414 if (GET_CODE (disp) != CONST)
6416 disp = XEXP (disp, 0);
6420 /* We are unsafe to allow PLUS expressions. This limit allowed distance
6421 of GOT tables. We should not need these anyway. */
6422 if (GET_CODE (disp) != UNSPEC
6423 || (XINT (disp, 1) != UNSPEC_GOTPCREL
6424 && XINT (disp, 1) != UNSPEC_GOTOFF))
6427 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
6428 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
6434 if (GET_CODE (disp) == PLUS)
6436 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
6438 disp = XEXP (disp, 0);
6442 if (TARGET_MACHO && darwin_local_data_pic (disp))
6445 if (GET_CODE (disp) != UNSPEC)
6448 switch (XINT (disp, 1))
6453 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
6455 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
6456 While ABI specify also 32bit relocation but we don't produce it in
6457 small PIC model at all. */
6458 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
6459 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
6461 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
6463 case UNSPEC_GOTTPOFF:
6464 case UNSPEC_GOTNTPOFF:
6465 case UNSPEC_INDNTPOFF:
6468 disp = XVECEXP (disp, 0, 0);
6469 return (GET_CODE (disp) == SYMBOL_REF
6470 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
6472 disp = XVECEXP (disp, 0, 0);
6473 return (GET_CODE (disp) == SYMBOL_REF
6474 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
6476 disp = XVECEXP (disp, 0, 0);
6477 return (GET_CODE (disp) == SYMBOL_REF
6478 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
6484 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
6485 memory address for an instruction. The MODE argument is the machine mode
6486 for the MEM expression that wants to use this address.
6488 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
6489 convert common non-canonical forms to canonical form so that they will
6493 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
6495 struct ix86_address parts;
6496 rtx base, index, disp;
6497 HOST_WIDE_INT scale;
6498 const char *reason = NULL;
6499 rtx reason_rtx = NULL_RTX;
6501 if (TARGET_DEBUG_ADDR)
6504 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
6505 GET_MODE_NAME (mode), strict);
6509 if (ix86_decompose_address (addr, &parts) <= 0)
6511 reason = "decomposition failed";
6516 index = parts.index;
6518 scale = parts.scale;
6520 /* Validate base register.
6522 Don't allow SUBREG's that span more than a word here. It can lead to spill
6523 failures when the base is one word out of a two word structure, which is
6524 represented internally as a DImode int. */
6533 else if (GET_CODE (base) == SUBREG
6534 && REG_P (SUBREG_REG (base))
6535 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
6537 reg = SUBREG_REG (base);
6540 reason = "base is not a register";
6544 if (GET_MODE (base) != Pmode)
6546 reason = "base is not in Pmode";
6550 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
6551 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
6553 reason = "base is not valid";
6558 /* Validate index register.
6560 Don't allow SUBREG's that span more than a word here -- same as above. */
6569 else if (GET_CODE (index) == SUBREG
6570 && REG_P (SUBREG_REG (index))
6571 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
6573 reg = SUBREG_REG (index);
6576 reason = "index is not a register";
6580 if (GET_MODE (index) != Pmode)
6582 reason = "index is not in Pmode";
6586 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
6587 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
6589 reason = "index is not valid";
6594 /* Validate scale factor. */
6597 reason_rtx = GEN_INT (scale);
6600 reason = "scale without index";
6604 if (scale != 2 && scale != 4 && scale != 8)
6606 reason = "scale is not a valid multiplier";
6611 /* Validate displacement. */
6616 if (GET_CODE (disp) == CONST
6617 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6618 switch (XINT (XEXP (disp, 0), 1))
6620 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
6621 used. While ABI specify also 32bit relocations, we don't produce
6622 them at all and use IP relative instead. */
6625 gcc_assert (flag_pic);
6627 goto is_legitimate_pic;
6628 reason = "64bit address unspec";
6631 case UNSPEC_GOTPCREL:
6632 gcc_assert (flag_pic);
6633 goto is_legitimate_pic;
6635 case UNSPEC_GOTTPOFF:
6636 case UNSPEC_GOTNTPOFF:
6637 case UNSPEC_INDNTPOFF:
6643 reason = "invalid address unspec";
6647 else if (SYMBOLIC_CONST (disp)
6651 && MACHOPIC_INDIRECT
6652 && !machopic_operand_p (disp)
6658 if (TARGET_64BIT && (index || base))
6660 /* foo@dtpoff(%rX) is ok. */
6661 if (GET_CODE (disp) != CONST
6662 || GET_CODE (XEXP (disp, 0)) != PLUS
6663 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6664 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
6665 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6666 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6668 reason = "non-constant pic memory reference";
6672 else if (! legitimate_pic_address_disp_p (disp))
6674 reason = "displacement is an invalid pic construct";
6678 /* This code used to verify that a symbolic pic displacement
6679 includes the pic_offset_table_rtx register.
6681 While this is good idea, unfortunately these constructs may
6682 be created by "adds using lea" optimization for incorrect
6691 This code is nonsensical, but results in addressing
6692 GOT table with pic_offset_table_rtx base. We can't
6693 just refuse it easily, since it gets matched by
6694 "addsi3" pattern, that later gets split to lea in the
6695 case output register differs from input. While this
6696 can be handled by separate addsi pattern for this case
6697 that never results in lea, this seems to be easier and
6698 correct fix for crash to disable this test. */
6700 else if (GET_CODE (disp) != LABEL_REF
6701 && GET_CODE (disp) != CONST_INT
6702 && (GET_CODE (disp) != CONST
6703 || !legitimate_constant_p (disp))
6704 && (GET_CODE (disp) != SYMBOL_REF
6705 || !legitimate_constant_p (disp)))
6707 reason = "displacement is not constant";
6710 else if (TARGET_64BIT
6711 && !x86_64_immediate_operand (disp, VOIDmode))
6713 reason = "displacement is out of range";
6718 /* Everything looks valid. */
6719 if (TARGET_DEBUG_ADDR)
6720 fprintf (stderr, "Success.\n");
6724 if (TARGET_DEBUG_ADDR)
6726 fprintf (stderr, "Error: %s\n", reason);
6727 debug_rtx (reason_rtx);
6732 /* Return a unique alias set for the GOT. */
6734 static HOST_WIDE_INT
6735 ix86_GOT_alias_set (void)
6737 static HOST_WIDE_INT set = -1;
6739 set = new_alias_set ();
6743 /* Return a legitimate reference for ORIG (an address) using the
6744 register REG. If REG is 0, a new pseudo is generated.
6746 There are two types of references that must be handled:
6748 1. Global data references must load the address from the GOT, via
6749 the PIC reg. An insn is emitted to do this load, and the reg is
6752 2. Static data references, constant pool addresses, and code labels
6753 compute the address as an offset from the GOT, whose base is in
6754 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6755 differentiate them from global data objects. The returned
6756 address is the PIC reg + an unspec constant.
6758 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6759 reg also appears in the address. */
6762 legitimize_pic_address (rtx orig, rtx reg)
6769 if (TARGET_MACHO && !TARGET_64BIT)
6772 reg = gen_reg_rtx (Pmode);
6773 /* Use the generic Mach-O PIC machinery. */
6774 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6778 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6780 else if (TARGET_64BIT
6781 && ix86_cmodel != CM_SMALL_PIC
6782 && local_symbolic_operand (addr, Pmode))
6785 /* This symbol may be referenced via a displacement from the PIC
6786 base address (@GOTOFF). */
6788 if (reload_in_progress)
6789 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6790 if (GET_CODE (addr) == CONST)
6791 addr = XEXP (addr, 0);
6792 if (GET_CODE (addr) == PLUS)
6794 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6795 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6798 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6799 new = gen_rtx_CONST (Pmode, new);
6801 tmpreg = gen_reg_rtx (Pmode);
6804 emit_move_insn (tmpreg, new);
6808 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6809 tmpreg, 1, OPTAB_DIRECT);
6812 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6814 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6816 /* This symbol may be referenced via a displacement from the PIC
6817 base address (@GOTOFF). */
6819 if (reload_in_progress)
6820 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6821 if (GET_CODE (addr) == CONST)
6822 addr = XEXP (addr, 0);
6823 if (GET_CODE (addr) == PLUS)
6825 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6826 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6829 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6830 new = gen_rtx_CONST (Pmode, new);
6831 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6835 emit_move_insn (reg, new);
6839 else if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
6843 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6844 new = gen_rtx_CONST (Pmode, new);
6845 new = gen_const_mem (Pmode, new);
6846 set_mem_alias_set (new, ix86_GOT_alias_set ());
6849 reg = gen_reg_rtx (Pmode);
6850 /* Use directly gen_movsi, otherwise the address is loaded
6851 into register for CSE. We don't want to CSE this addresses,
6852 instead we CSE addresses from the GOT table, so skip this. */
6853 emit_insn (gen_movsi (reg, new));
6858 /* This symbol must be referenced via a load from the
6859 Global Offset Table (@GOT). */
6861 if (reload_in_progress)
6862 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6863 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6864 new = gen_rtx_CONST (Pmode, new);
6865 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6866 new = gen_const_mem (Pmode, new);
6867 set_mem_alias_set (new, ix86_GOT_alias_set ());
6870 reg = gen_reg_rtx (Pmode);
6871 emit_move_insn (reg, new);
6877 if (GET_CODE (addr) == CONST_INT
6878 && !x86_64_immediate_operand (addr, VOIDmode))
6882 emit_move_insn (reg, addr);
6886 new = force_reg (Pmode, addr);
6888 else if (GET_CODE (addr) == CONST)
6890 addr = XEXP (addr, 0);
6892 /* We must match stuff we generate before. Assume the only
6893 unspecs that can get here are ours. Not that we could do
6894 anything with them anyway.... */
6895 if (GET_CODE (addr) == UNSPEC
6896 || (GET_CODE (addr) == PLUS
6897 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6899 gcc_assert (GET_CODE (addr) == PLUS);
6901 if (GET_CODE (addr) == PLUS)
6903 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6905 /* Check first to see if this is a constant offset from a @GOTOFF
6906 symbol reference. */
6907 if (local_symbolic_operand (op0, Pmode)
6908 && GET_CODE (op1) == CONST_INT)
6912 if (reload_in_progress)
6913 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6914 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6916 new = gen_rtx_PLUS (Pmode, new, op1);
6917 new = gen_rtx_CONST (Pmode, new);
6918 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6922 emit_move_insn (reg, new);
6928 if (INTVAL (op1) < -16*1024*1024
6929 || INTVAL (op1) >= 16*1024*1024)
6931 if (!x86_64_immediate_operand (op1, Pmode))
6932 op1 = force_reg (Pmode, op1);
6933 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6939 base = legitimize_pic_address (XEXP (addr, 0), reg);
6940 new = legitimize_pic_address (XEXP (addr, 1),
6941 base == reg ? NULL_RTX : reg);
6943 if (GET_CODE (new) == CONST_INT)
6944 new = plus_constant (base, INTVAL (new));
6947 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6949 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6950 new = XEXP (new, 1);
6952 new = gen_rtx_PLUS (Pmode, base, new);
6960 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6963 get_thread_pointer (int to_reg)
6967 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6971 reg = gen_reg_rtx (Pmode);
6972 insn = gen_rtx_SET (VOIDmode, reg, tp);
6973 insn = emit_insn (insn);
6978 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6979 false if we expect this to be used for a memory address and true if
6980 we expect to load the address into a register. */
6983 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6985 rtx dest, base, off, pic, tp;
6990 case TLS_MODEL_GLOBAL_DYNAMIC:
6991 dest = gen_reg_rtx (Pmode);
6992 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
6994 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
6996 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6999 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7000 insns = get_insns ();
7003 emit_libcall_block (insns, dest, rax, x);
7005 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7006 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7008 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7010 if (TARGET_GNU2_TLS)
7012 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7014 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7018 case TLS_MODEL_LOCAL_DYNAMIC:
7019 base = gen_reg_rtx (Pmode);
7020 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7022 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7024 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
7027 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7028 insns = get_insns ();
7031 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7032 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7033 emit_libcall_block (insns, base, rax, note);
7035 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7036 emit_insn (gen_tls_local_dynamic_base_64 (base));
7038 emit_insn (gen_tls_local_dynamic_base_32 (base));
7040 if (TARGET_GNU2_TLS)
7042 rtx x = ix86_tls_module_base ();
7044 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7045 gen_rtx_MINUS (Pmode, x, tp));
7048 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7049 off = gen_rtx_CONST (Pmode, off);
7051 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7053 if (TARGET_GNU2_TLS)
7055 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7057 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7062 case TLS_MODEL_INITIAL_EXEC:
7066 type = UNSPEC_GOTNTPOFF;
7070 if (reload_in_progress)
7071 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7072 pic = pic_offset_table_rtx;
7073 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7075 else if (!TARGET_ANY_GNU_TLS)
7077 pic = gen_reg_rtx (Pmode);
7078 emit_insn (gen_set_got (pic));
7079 type = UNSPEC_GOTTPOFF;
7084 type = UNSPEC_INDNTPOFF;
7087 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7088 off = gen_rtx_CONST (Pmode, off);
7090 off = gen_rtx_PLUS (Pmode, pic, off);
7091 off = gen_const_mem (Pmode, off);
7092 set_mem_alias_set (off, ix86_GOT_alias_set ());
7094 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7096 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7097 off = force_reg (Pmode, off);
7098 return gen_rtx_PLUS (Pmode, base, off);
7102 base = get_thread_pointer (true);
7103 dest = gen_reg_rtx (Pmode);
7104 emit_insn (gen_subsi3 (dest, base, off));
7108 case TLS_MODEL_LOCAL_EXEC:
7109 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7110 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7111 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7112 off = gen_rtx_CONST (Pmode, off);
7114 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7116 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7117 return gen_rtx_PLUS (Pmode, base, off);
7121 base = get_thread_pointer (true);
7122 dest = gen_reg_rtx (Pmode);
7123 emit_insn (gen_subsi3 (dest, base, off));
7134 /* Try machine-dependent ways of modifying an illegitimate address
7135 to be legitimate. If we find one, return the new, valid address.
7136 This macro is used in only one place: `memory_address' in explow.c.
7138 OLDX is the address as it was before break_out_memory_refs was called.
7139 In some cases it is useful to look at this to decide what needs to be done.
7141 MODE and WIN are passed so that this macro can use
7142 GO_IF_LEGITIMATE_ADDRESS.
7144 It is always safe for this macro to do nothing. It exists to recognize
7145 opportunities to optimize the output.
7147 For the 80386, we handle X+REG by loading X into a register R and
7148 using R+REG. R will go in a general reg and indexing will be used.
7149 However, if REG is a broken-out memory address or multiplication,
7150 nothing needs to be done because REG can certainly go in a general reg.
7152 When -fpic is used, special handling is needed for symbolic references.
7153 See comments by legitimize_pic_address in i386.c for details. */
7156 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
7161 if (TARGET_DEBUG_ADDR)
7163 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
7164 GET_MODE_NAME (mode));
7168 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
7170 return legitimize_tls_address (x, log, false);
7171 if (GET_CODE (x) == CONST
7172 && GET_CODE (XEXP (x, 0)) == PLUS
7173 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7174 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
7176 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
7177 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
7180 if (flag_pic && SYMBOLIC_CONST (x))
7181 return legitimize_pic_address (x, 0);
7183 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
7184 if (GET_CODE (x) == ASHIFT
7185 && GET_CODE (XEXP (x, 1)) == CONST_INT
7186 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
7189 log = INTVAL (XEXP (x, 1));
7190 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
7191 GEN_INT (1 << log));
7194 if (GET_CODE (x) == PLUS)
7196 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
7198 if (GET_CODE (XEXP (x, 0)) == ASHIFT
7199 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7200 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
7203 log = INTVAL (XEXP (XEXP (x, 0), 1));
7204 XEXP (x, 0) = gen_rtx_MULT (Pmode,
7205 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
7206 GEN_INT (1 << log));
7209 if (GET_CODE (XEXP (x, 1)) == ASHIFT
7210 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
7211 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
7214 log = INTVAL (XEXP (XEXP (x, 1), 1));
7215 XEXP (x, 1) = gen_rtx_MULT (Pmode,
7216 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
7217 GEN_INT (1 << log));
7220 /* Put multiply first if it isn't already. */
7221 if (GET_CODE (XEXP (x, 1)) == MULT)
7223 rtx tmp = XEXP (x, 0);
7224 XEXP (x, 0) = XEXP (x, 1);
7229 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
7230 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
7231 created by virtual register instantiation, register elimination, and
7232 similar optimizations. */
7233 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
7236 x = gen_rtx_PLUS (Pmode,
7237 gen_rtx_PLUS (Pmode, XEXP (x, 0),
7238 XEXP (XEXP (x, 1), 0)),
7239 XEXP (XEXP (x, 1), 1));
7243 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
7244 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
7245 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
7246 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
7247 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
7248 && CONSTANT_P (XEXP (x, 1)))
7251 rtx other = NULL_RTX;
7253 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7255 constant = XEXP (x, 1);
7256 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
7258 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
7260 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
7261 other = XEXP (x, 1);
7269 x = gen_rtx_PLUS (Pmode,
7270 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
7271 XEXP (XEXP (XEXP (x, 0), 1), 0)),
7272 plus_constant (other, INTVAL (constant)));
7276 if (changed && legitimate_address_p (mode, x, FALSE))
7279 if (GET_CODE (XEXP (x, 0)) == MULT)
7282 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
7285 if (GET_CODE (XEXP (x, 1)) == MULT)
7288 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
7292 && GET_CODE (XEXP (x, 1)) == REG
7293 && GET_CODE (XEXP (x, 0)) == REG)
7296 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
7299 x = legitimize_pic_address (x, 0);
7302 if (changed && legitimate_address_p (mode, x, FALSE))
7305 if (GET_CODE (XEXP (x, 0)) == REG)
7307 rtx temp = gen_reg_rtx (Pmode);
7308 rtx val = force_operand (XEXP (x, 1), temp);
7310 emit_move_insn (temp, val);
7316 else if (GET_CODE (XEXP (x, 1)) == REG)
7318 rtx temp = gen_reg_rtx (Pmode);
7319 rtx val = force_operand (XEXP (x, 0), temp);
7321 emit_move_insn (temp, val);
7331 /* Print an integer constant expression in assembler syntax. Addition
7332 and subtraction are the only arithmetic that may appear in these
7333 expressions. FILE is the stdio stream to write to, X is the rtx, and
7334 CODE is the operand print code from the output string. */
7337 output_pic_addr_const (FILE *file, rtx x, int code)
7341 switch (GET_CODE (x))
7344 gcc_assert (flag_pic);
7349 output_addr_const (file, x);
7350 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
7351 fputs ("@PLT", file);
7358 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
7359 assemble_name (asm_out_file, buf);
7363 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7367 /* This used to output parentheses around the expression,
7368 but that does not work on the 386 (either ATT or BSD assembler). */
7369 output_pic_addr_const (file, XEXP (x, 0), code);
7373 if (GET_MODE (x) == VOIDmode)
7375 /* We can use %d if the number is <32 bits and positive. */
7376 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
7377 fprintf (file, "0x%lx%08lx",
7378 (unsigned long) CONST_DOUBLE_HIGH (x),
7379 (unsigned long) CONST_DOUBLE_LOW (x));
7381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
7384 /* We can't handle floating point constants;
7385 PRINT_OPERAND must handle them. */
7386 output_operand_lossage ("floating constant misused");
7390 /* Some assemblers need integer constants to appear first. */
7391 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
7393 output_pic_addr_const (file, XEXP (x, 0), code);
7395 output_pic_addr_const (file, XEXP (x, 1), code);
7399 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
7400 output_pic_addr_const (file, XEXP (x, 1), code);
7402 output_pic_addr_const (file, XEXP (x, 0), code);
7408 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
7409 output_pic_addr_const (file, XEXP (x, 0), code);
7411 output_pic_addr_const (file, XEXP (x, 1), code);
7413 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
7417 gcc_assert (XVECLEN (x, 0) == 1);
7418 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
7419 switch (XINT (x, 1))
7422 fputs ("@GOT", file);
7425 fputs ("@GOTOFF", file);
7427 case UNSPEC_GOTPCREL:
7428 fputs ("@GOTPCREL(%rip)", file);
7430 case UNSPEC_GOTTPOFF:
7431 /* FIXME: This might be @TPOFF in Sun ld too. */
7432 fputs ("@GOTTPOFF", file);
7435 fputs ("@TPOFF", file);
7439 fputs ("@TPOFF", file);
7441 fputs ("@NTPOFF", file);
7444 fputs ("@DTPOFF", file);
7446 case UNSPEC_GOTNTPOFF:
7448 fputs ("@GOTTPOFF(%rip)", file);
7450 fputs ("@GOTNTPOFF", file);
7452 case UNSPEC_INDNTPOFF:
7453 fputs ("@INDNTPOFF", file);
7456 output_operand_lossage ("invalid UNSPEC as operand");
7462 output_operand_lossage ("invalid expression as operand");
7466 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7467 We need to emit DTP-relative relocations. */
7470 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
7472 fputs (ASM_LONG, file);
7473 output_addr_const (file, x);
7474 fputs ("@DTPOFF", file);
7480 fputs (", 0", file);
7487 /* In the name of slightly smaller debug output, and to cater to
7488 general assembler lossage, recognize PIC+GOTOFF and turn it back
7489 into a direct symbol reference.
7491 On Darwin, this is necessary to avoid a crash, because Darwin
7492 has a different PIC label for each routine but the DWARF debugging
7493 information is not associated with any particular routine, so it's
7494 necessary to remove references to the PIC label from RTL stored by
7495 the DWARF output code. */
7498 ix86_delegitimize_address (rtx orig_x)
7501 /* reg_addend is NULL or a multiple of some register. */
7502 rtx reg_addend = NULL_RTX;
7503 /* const_addend is NULL or a const_int. */
7504 rtx const_addend = NULL_RTX;
7505 /* This is the result, or NULL. */
7506 rtx result = NULL_RTX;
7508 if (GET_CODE (x) == MEM)
7513 if (GET_CODE (x) != CONST
7514 || GET_CODE (XEXP (x, 0)) != UNSPEC
7515 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
7516 || GET_CODE (orig_x) != MEM)
7518 return XVECEXP (XEXP (x, 0), 0, 0);
7521 if (GET_CODE (x) != PLUS
7522 || GET_CODE (XEXP (x, 1)) != CONST)
7525 if (GET_CODE (XEXP (x, 0)) == REG
7526 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7527 /* %ebx + GOT/GOTOFF */
7529 else if (GET_CODE (XEXP (x, 0)) == PLUS)
7531 /* %ebx + %reg * scale + GOT/GOTOFF */
7532 reg_addend = XEXP (x, 0);
7533 if (GET_CODE (XEXP (reg_addend, 0)) == REG
7534 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
7535 reg_addend = XEXP (reg_addend, 1);
7536 else if (GET_CODE (XEXP (reg_addend, 1)) == REG
7537 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
7538 reg_addend = XEXP (reg_addend, 0);
7541 if (GET_CODE (reg_addend) != REG
7542 && GET_CODE (reg_addend) != MULT
7543 && GET_CODE (reg_addend) != ASHIFT)
7549 x = XEXP (XEXP (x, 1), 0);
7550 if (GET_CODE (x) == PLUS
7551 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7553 const_addend = XEXP (x, 1);
7557 if (GET_CODE (x) == UNSPEC
7558 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
7559 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
7560 result = XVECEXP (x, 0, 0);
7562 if (TARGET_MACHO && darwin_local_data_pic (x)
7563 && GET_CODE (orig_x) != MEM)
7564 result = XEXP (x, 0);
7570 result = gen_rtx_PLUS (Pmode, result, const_addend);
7572 result = gen_rtx_PLUS (Pmode, reg_addend, result);
7577 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
7582 if (mode == CCFPmode || mode == CCFPUmode)
7584 enum rtx_code second_code, bypass_code;
7585 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
7586 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
7587 code = ix86_fp_compare_code_to_integer (code);
7591 code = reverse_condition (code);
7602 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
7606 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
7607 Those same assemblers have the same but opposite lossage on cmov. */
7608 gcc_assert (mode == CCmode);
7609 suffix = fp ? "nbe" : "a";
7629 gcc_assert (mode == CCmode);
7651 gcc_assert (mode == CCmode);
7652 suffix = fp ? "nb" : "ae";
7655 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
7659 gcc_assert (mode == CCmode);
7663 suffix = fp ? "u" : "p";
7666 suffix = fp ? "nu" : "np";
7671 fputs (suffix, file);
7674 /* Print the name of register X to FILE based on its machine mode and number.
7675 If CODE is 'w', pretend the mode is HImode.
7676 If CODE is 'b', pretend the mode is QImode.
7677 If CODE is 'k', pretend the mode is SImode.
7678 If CODE is 'q', pretend the mode is DImode.
7679 If CODE is 'h', pretend the reg is the 'high' byte register.
7680 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
7683 print_reg (rtx x, int code, FILE *file)
7685 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
7686 && REGNO (x) != FRAME_POINTER_REGNUM
7687 && REGNO (x) != FLAGS_REG
7688 && REGNO (x) != FPSR_REG
7689 && REGNO (x) != FPCR_REG);
7691 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
7694 if (code == 'w' || MMX_REG_P (x))
7696 else if (code == 'b')
7698 else if (code == 'k')
7700 else if (code == 'q')
7702 else if (code == 'y')
7704 else if (code == 'h')
7707 code = GET_MODE_SIZE (GET_MODE (x));
7709 /* Irritatingly, AMD extended registers use different naming convention
7710 from the normal registers. */
7711 if (REX_INT_REG_P (x))
7713 gcc_assert (TARGET_64BIT);
7717 error ("extended registers have no high halves");
7720 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
7723 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
7726 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
7729 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
7732 error ("unsupported operand size for extended register");
7740 if (STACK_TOP_P (x))
7742 fputs ("st(0)", file);
7749 if (! ANY_FP_REG_P (x))
7750 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
7755 fputs (hi_reg_name[REGNO (x)], file);
7758 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
7760 fputs (qi_reg_name[REGNO (x)], file);
7763 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
7765 fputs (qi_high_reg_name[REGNO (x)], file);
7772 /* Locate some local-dynamic symbol still in use by this function
7773 so that we can print its name in some tls_local_dynamic_base
7777 get_some_local_dynamic_name (void)
7781 if (cfun->machine->some_ld_name)
7782 return cfun->machine->some_ld_name;
7784 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7786 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7787 return cfun->machine->some_ld_name;
7793 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7797 if (GET_CODE (x) == SYMBOL_REF
7798 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7800 cfun->machine->some_ld_name = XSTR (x, 0);
7808 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7809 C -- print opcode suffix for set/cmov insn.
7810 c -- like C, but print reversed condition
7811 F,f -- likewise, but for floating-point.
7812 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7814 R -- print the prefix for register names.
7815 z -- print the opcode suffix for the size of the current operand.
7816 * -- print a star (in certain assembler syntax)
7817 A -- print an absolute memory reference.
7818 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7819 s -- print a shift double count, followed by the assemblers argument
7821 b -- print the QImode name of the register for the indicated operand.
7822 %b0 would print %al if operands[0] is reg 0.
7823 w -- likewise, print the HImode name of the register.
7824 k -- likewise, print the SImode name of the register.
7825 q -- likewise, print the DImode name of the register.
7826 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7827 y -- print "st(0)" instead of "st" as a register.
7828 D -- print condition for SSE cmp instruction.
7829 P -- if PIC, print an @PLT suffix.
7830 X -- don't print any sort of PIC '@' suffix for a symbol.
7831 & -- print some in-use local-dynamic symbol name.
7832 H -- print a memory address offset by 8; used for sse high-parts
7836 print_operand (FILE *file, rtx x, int code)
7843 if (ASSEMBLER_DIALECT == ASM_ATT)
7848 assemble_name (file, get_some_local_dynamic_name ());
7852 switch (ASSEMBLER_DIALECT)
7859 /* Intel syntax. For absolute addresses, registers should not
7860 be surrounded by braces. */
7861 if (GET_CODE (x) != REG)
7864 PRINT_OPERAND (file, x, 0);
7874 PRINT_OPERAND (file, x, 0);
7879 if (ASSEMBLER_DIALECT == ASM_ATT)
7884 if (ASSEMBLER_DIALECT == ASM_ATT)
7889 if (ASSEMBLER_DIALECT == ASM_ATT)
7894 if (ASSEMBLER_DIALECT == ASM_ATT)
7899 if (ASSEMBLER_DIALECT == ASM_ATT)
7904 if (ASSEMBLER_DIALECT == ASM_ATT)
7909 /* 387 opcodes don't get size suffixes if the operands are
7911 if (STACK_REG_P (x))
7914 /* Likewise if using Intel opcodes. */
7915 if (ASSEMBLER_DIALECT == ASM_INTEL)
7918 /* This is the size of op from size of operand. */
7919 switch (GET_MODE_SIZE (GET_MODE (x)))
7926 #ifdef HAVE_GAS_FILDS_FISTS
7932 if (GET_MODE (x) == SFmode)
7947 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7949 #ifdef GAS_MNEMONICS
7975 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7977 PRINT_OPERAND (file, x, 0);
7983 /* Little bit of braindamage here. The SSE compare instructions
7984 does use completely different names for the comparisons that the
7985 fp conditional moves. */
7986 switch (GET_CODE (x))
8001 fputs ("unord", file);
8005 fputs ("neq", file);
8009 fputs ("nlt", file);
8013 fputs ("nle", file);
8016 fputs ("ord", file);
8023 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8024 if (ASSEMBLER_DIALECT == ASM_ATT)
8026 switch (GET_MODE (x))
8028 case HImode: putc ('w', file); break;
8030 case SFmode: putc ('l', file); break;
8032 case DFmode: putc ('q', file); break;
8033 default: gcc_unreachable ();
8040 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
8043 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8044 if (ASSEMBLER_DIALECT == ASM_ATT)
8047 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
8050 /* Like above, but reverse condition */
8052 /* Check to see if argument to %c is really a constant
8053 and not a condition code which needs to be reversed. */
8054 if (!COMPARISON_P (x))
8056 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
8059 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
8062 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8063 if (ASSEMBLER_DIALECT == ASM_ATT)
8066 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
8070 /* It doesn't actually matter what mode we use here, as we're
8071 only going to use this for printing. */
8072 x = adjust_address_nv (x, DImode, 8);
8079 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
8082 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
8085 int pred_val = INTVAL (XEXP (x, 0));
8087 if (pred_val < REG_BR_PROB_BASE * 45 / 100
8088 || pred_val > REG_BR_PROB_BASE * 55 / 100)
8090 int taken = pred_val > REG_BR_PROB_BASE / 2;
8091 int cputaken = final_forward_branch_p (current_output_insn) == 0;
8093 /* Emit hints only in the case default branch prediction
8094 heuristics would fail. */
8095 if (taken != cputaken)
8097 /* We use 3e (DS) prefix for taken branches and
8098 2e (CS) prefix for not taken branches. */
8100 fputs ("ds ; ", file);
8102 fputs ("cs ; ", file);
8109 output_operand_lossage ("invalid operand code '%c'", code);
8113 if (GET_CODE (x) == REG)
8114 print_reg (x, code, file);
8116 else if (GET_CODE (x) == MEM)
8118 /* No `byte ptr' prefix for call instructions. */
8119 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
8122 switch (GET_MODE_SIZE (GET_MODE (x)))
8124 case 1: size = "BYTE"; break;
8125 case 2: size = "WORD"; break;
8126 case 4: size = "DWORD"; break;
8127 case 8: size = "QWORD"; break;
8128 case 12: size = "XWORD"; break;
8129 case 16: size = "XMMWORD"; break;
8134 /* Check for explicit size override (codes 'b', 'w' and 'k') */
8137 else if (code == 'w')
8139 else if (code == 'k')
8143 fputs (" PTR ", file);
8147 /* Avoid (%rip) for call operands. */
8148 if (CONSTANT_ADDRESS_P (x) && code == 'P'
8149 && GET_CODE (x) != CONST_INT)
8150 output_addr_const (file, x);
8151 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
8152 output_operand_lossage ("invalid constraints for operand");
8157 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
8162 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8163 REAL_VALUE_TO_TARGET_SINGLE (r, l);
8165 if (ASSEMBLER_DIALECT == ASM_ATT)
8167 fprintf (file, "0x%08lx", l);
8170 /* These float cases don't actually occur as immediate operands. */
8171 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
8175 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8176 fprintf (file, "%s", dstr);
8179 else if (GET_CODE (x) == CONST_DOUBLE
8180 && GET_MODE (x) == XFmode)
8184 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8185 fprintf (file, "%s", dstr);
8190 /* We have patterns that allow zero sets of memory, for instance.
8191 In 64-bit mode, we should probably support all 8-byte vectors,
8192 since we can in fact encode that into an immediate. */
8193 if (GET_CODE (x) == CONST_VECTOR)
8195 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
8201 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
8203 if (ASSEMBLER_DIALECT == ASM_ATT)
8206 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
8207 || GET_CODE (x) == LABEL_REF)
8209 if (ASSEMBLER_DIALECT == ASM_ATT)
8212 fputs ("OFFSET FLAT:", file);
8215 if (GET_CODE (x) == CONST_INT)
8216 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8218 output_pic_addr_const (file, x, code);
8220 output_addr_const (file, x);
8224 /* Print a memory operand whose address is ADDR. */
8227 print_operand_address (FILE *file, rtx addr)
8229 struct ix86_address parts;
8230 rtx base, index, disp;
8232 int ok = ix86_decompose_address (addr, &parts);
8237 index = parts.index;
8239 scale = parts.scale;
8247 if (USER_LABEL_PREFIX[0] == 0)
8249 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
8255 if (!base && !index)
8257 /* Displacement only requires special attention. */
8259 if (GET_CODE (disp) == CONST_INT)
8261 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
8263 if (USER_LABEL_PREFIX[0] == 0)
8265 fputs ("ds:", file);
8267 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
8270 output_pic_addr_const (file, disp, 0);
8272 output_addr_const (file, disp);
8274 /* Use one byte shorter RIP relative addressing for 64bit mode. */
8277 if (GET_CODE (disp) == CONST
8278 && GET_CODE (XEXP (disp, 0)) == PLUS
8279 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8280 disp = XEXP (XEXP (disp, 0), 0);
8281 if (GET_CODE (disp) == LABEL_REF
8282 || (GET_CODE (disp) == SYMBOL_REF
8283 && SYMBOL_REF_TLS_MODEL (disp) == 0))
8284 fputs ("(%rip)", file);
8289 if (ASSEMBLER_DIALECT == ASM_ATT)
8294 output_pic_addr_const (file, disp, 0);
8295 else if (GET_CODE (disp) == LABEL_REF)
8296 output_asm_label (disp);
8298 output_addr_const (file, disp);
8303 print_reg (base, 0, file);
8307 print_reg (index, 0, file);
8309 fprintf (file, ",%d", scale);
8315 rtx offset = NULL_RTX;
8319 /* Pull out the offset of a symbol; print any symbol itself. */
8320 if (GET_CODE (disp) == CONST
8321 && GET_CODE (XEXP (disp, 0)) == PLUS
8322 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8324 offset = XEXP (XEXP (disp, 0), 1);
8325 disp = gen_rtx_CONST (VOIDmode,
8326 XEXP (XEXP (disp, 0), 0));
8330 output_pic_addr_const (file, disp, 0);
8331 else if (GET_CODE (disp) == LABEL_REF)
8332 output_asm_label (disp);
8333 else if (GET_CODE (disp) == CONST_INT)
8336 output_addr_const (file, disp);
8342 print_reg (base, 0, file);
8345 if (INTVAL (offset) >= 0)
8347 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8351 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8358 print_reg (index, 0, file);
8360 fprintf (file, "*%d", scale);
8368 output_addr_const_extra (FILE *file, rtx x)
8372 if (GET_CODE (x) != UNSPEC)
8375 op = XVECEXP (x, 0, 0);
8376 switch (XINT (x, 1))
8378 case UNSPEC_GOTTPOFF:
8379 output_addr_const (file, op);
8380 /* FIXME: This might be @TPOFF in Sun ld. */
8381 fputs ("@GOTTPOFF", file);
8384 output_addr_const (file, op);
8385 fputs ("@TPOFF", file);
8388 output_addr_const (file, op);
8390 fputs ("@TPOFF", file);
8392 fputs ("@NTPOFF", file);
8395 output_addr_const (file, op);
8396 fputs ("@DTPOFF", file);
8398 case UNSPEC_GOTNTPOFF:
8399 output_addr_const (file, op);
8401 fputs ("@GOTTPOFF(%rip)", file);
8403 fputs ("@GOTNTPOFF", file);
8405 case UNSPEC_INDNTPOFF:
8406 output_addr_const (file, op);
8407 fputs ("@INDNTPOFF", file);
8417 /* Split one or more DImode RTL references into pairs of SImode
8418 references. The RTL can be REG, offsettable MEM, integer constant, or
8419 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8420 split and "num" is its length. lo_half and hi_half are output arrays
8421 that parallel "operands". */
8424 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8428 rtx op = operands[num];
8430 /* simplify_subreg refuse to split volatile memory addresses,
8431 but we still have to handle it. */
8432 if (GET_CODE (op) == MEM)
8434 lo_half[num] = adjust_address (op, SImode, 0);
8435 hi_half[num] = adjust_address (op, SImode, 4);
8439 lo_half[num] = simplify_gen_subreg (SImode, op,
8440 GET_MODE (op) == VOIDmode
8441 ? DImode : GET_MODE (op), 0);
8442 hi_half[num] = simplify_gen_subreg (SImode, op,
8443 GET_MODE (op) == VOIDmode
8444 ? DImode : GET_MODE (op), 4);
8448 /* Split one or more TImode RTL references into pairs of DImode
8449 references. The RTL can be REG, offsettable MEM, integer constant, or
8450 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8451 split and "num" is its length. lo_half and hi_half are output arrays
8452 that parallel "operands". */
8455 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8459 rtx op = operands[num];
8461 /* simplify_subreg refuse to split volatile memory addresses, but we
8462 still have to handle it. */
8463 if (GET_CODE (op) == MEM)
8465 lo_half[num] = adjust_address (op, DImode, 0);
8466 hi_half[num] = adjust_address (op, DImode, 8);
8470 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
8471 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
8476 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
8477 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
8478 is the expression of the binary operation. The output may either be
8479 emitted here, or returned to the caller, like all output_* functions.
8481 There is no guarantee that the operands are the same mode, as they
8482 might be within FLOAT or FLOAT_EXTEND expressions. */
8484 #ifndef SYSV386_COMPAT
8485 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
8486 wants to fix the assemblers because that causes incompatibility
8487 with gcc. No-one wants to fix gcc because that causes
8488 incompatibility with assemblers... You can use the option of
8489 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
8490 #define SYSV386_COMPAT 1
8494 output_387_binary_op (rtx insn, rtx *operands)
8496 static char buf[30];
8499 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
8501 #ifdef ENABLE_CHECKING
8502 /* Even if we do not want to check the inputs, this documents input
8503 constraints. Which helps in understanding the following code. */
8504 if (STACK_REG_P (operands[0])
8505 && ((REG_P (operands[1])
8506 && REGNO (operands[0]) == REGNO (operands[1])
8507 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
8508 || (REG_P (operands[2])
8509 && REGNO (operands[0]) == REGNO (operands[2])
8510 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
8511 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
8514 gcc_assert (is_sse);
8517 switch (GET_CODE (operands[3]))
8520 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8521 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8529 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8530 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8538 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8539 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8547 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8548 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8562 if (GET_MODE (operands[0]) == SFmode)
8563 strcat (buf, "ss\t{%2, %0|%0, %2}");
8565 strcat (buf, "sd\t{%2, %0|%0, %2}");
8570 switch (GET_CODE (operands[3]))
8574 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
8576 rtx temp = operands[2];
8577 operands[2] = operands[1];
8581 /* know operands[0] == operands[1]. */
8583 if (GET_CODE (operands[2]) == MEM)
8589 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8591 if (STACK_TOP_P (operands[0]))
8592 /* How is it that we are storing to a dead operand[2]?
8593 Well, presumably operands[1] is dead too. We can't
8594 store the result to st(0) as st(0) gets popped on this
8595 instruction. Instead store to operands[2] (which I
8596 think has to be st(1)). st(1) will be popped later.
8597 gcc <= 2.8.1 didn't have this check and generated
8598 assembly code that the Unixware assembler rejected. */
8599 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8601 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8605 if (STACK_TOP_P (operands[0]))
8606 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8608 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8613 if (GET_CODE (operands[1]) == MEM)
8619 if (GET_CODE (operands[2]) == MEM)
8625 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8628 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
8629 derived assemblers, confusingly reverse the direction of
8630 the operation for fsub{r} and fdiv{r} when the
8631 destination register is not st(0). The Intel assembler
8632 doesn't have this brain damage. Read !SYSV386_COMPAT to
8633 figure out what the hardware really does. */
8634 if (STACK_TOP_P (operands[0]))
8635 p = "{p\t%0, %2|rp\t%2, %0}";
8637 p = "{rp\t%2, %0|p\t%0, %2}";
8639 if (STACK_TOP_P (operands[0]))
8640 /* As above for fmul/fadd, we can't store to st(0). */
8641 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8643 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8648 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
8651 if (STACK_TOP_P (operands[0]))
8652 p = "{rp\t%0, %1|p\t%1, %0}";
8654 p = "{p\t%1, %0|rp\t%0, %1}";
8656 if (STACK_TOP_P (operands[0]))
8657 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
8659 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
8664 if (STACK_TOP_P (operands[0]))
8666 if (STACK_TOP_P (operands[1]))
8667 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8669 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
8672 else if (STACK_TOP_P (operands[1]))
8675 p = "{\t%1, %0|r\t%0, %1}";
8677 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
8683 p = "{r\t%2, %0|\t%0, %2}";
8685 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8698 /* Return needed mode for entity in optimize_mode_switching pass. */
8701 ix86_mode_needed (int entity, rtx insn)
8703 enum attr_i387_cw mode;
8705 /* The mode UNINITIALIZED is used to store control word after a
8706 function call or ASM pattern. The mode ANY specify that function
8707 has no requirements on the control word and make no changes in the
8708 bits we are interested in. */
8711 || (NONJUMP_INSN_P (insn)
8712 && (asm_noperands (PATTERN (insn)) >= 0
8713 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
8714 return I387_CW_UNINITIALIZED;
8716 if (recog_memoized (insn) < 0)
8719 mode = get_attr_i387_cw (insn);
8724 if (mode == I387_CW_TRUNC)
8729 if (mode == I387_CW_FLOOR)
8734 if (mode == I387_CW_CEIL)
8739 if (mode == I387_CW_MASK_PM)
8750 /* Output code to initialize control word copies used by trunc?f?i and
8751 rounding patterns. CURRENT_MODE is set to current control word,
8752 while NEW_MODE is set to new control word. */
8755 emit_i387_cw_initialization (int mode)
8757 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
8762 rtx reg = gen_reg_rtx (HImode);
8764 emit_insn (gen_x86_fnstcw_1 (stored_mode));
8765 emit_move_insn (reg, copy_rtx (stored_mode));
8767 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
8772 /* round toward zero (truncate) */
8773 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
8774 slot = SLOT_CW_TRUNC;
8778 /* round down toward -oo */
8779 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8780 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
8781 slot = SLOT_CW_FLOOR;
8785 /* round up toward +oo */
8786 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8787 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
8788 slot = SLOT_CW_CEIL;
8791 case I387_CW_MASK_PM:
8792 /* mask precision exception for nearbyint() */
8793 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8794 slot = SLOT_CW_MASK_PM;
8806 /* round toward zero (truncate) */
8807 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8808 slot = SLOT_CW_TRUNC;
8812 /* round down toward -oo */
8813 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8814 slot = SLOT_CW_FLOOR;
8818 /* round up toward +oo */
8819 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8820 slot = SLOT_CW_CEIL;
8823 case I387_CW_MASK_PM:
8824 /* mask precision exception for nearbyint() */
8825 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8826 slot = SLOT_CW_MASK_PM;
8834 gcc_assert (slot < MAX_386_STACK_LOCALS);
8836 new_mode = assign_386_stack_local (HImode, slot);
8837 emit_move_insn (new_mode, reg);
8840 /* Output code for INSN to convert a float to a signed int. OPERANDS
8841 are the insn operands. The output may be [HSD]Imode and the input
8842 operand may be [SDX]Fmode. */
8845 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8847 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8848 int dimode_p = GET_MODE (operands[0]) == DImode;
8849 int round_mode = get_attr_i387_cw (insn);
8851 /* Jump through a hoop or two for DImode, since the hardware has no
8852 non-popping instruction. We used to do this a different way, but
8853 that was somewhat fragile and broke with post-reload splitters. */
8854 if ((dimode_p || fisttp) && !stack_top_dies)
8855 output_asm_insn ("fld\t%y1", operands);
8857 gcc_assert (STACK_TOP_P (operands[1]));
8858 gcc_assert (GET_CODE (operands[0]) == MEM);
8861 output_asm_insn ("fisttp%z0\t%0", operands);
8864 if (round_mode != I387_CW_ANY)
8865 output_asm_insn ("fldcw\t%3", operands);
8866 if (stack_top_dies || dimode_p)
8867 output_asm_insn ("fistp%z0\t%0", operands);
8869 output_asm_insn ("fist%z0\t%0", operands);
8870 if (round_mode != I387_CW_ANY)
8871 output_asm_insn ("fldcw\t%2", operands);
8877 /* Output code for x87 ffreep insn. The OPNO argument, which may only
8878 have the values zero or one, indicates the ffreep insn's operand
8879 from the OPERANDS array. */
8882 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
8884 if (TARGET_USE_FFREEP)
8885 #if HAVE_AS_IX86_FFREEP
8886 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
8889 static char retval[] = ".word\t0xc_df";
8890 int regno = REGNO (operands[opno]);
8892 gcc_assert (FP_REGNO_P (regno));
8894 retval[9] = '0' + (regno - FIRST_STACK_REG);
8899 return opno ? "fstp\t%y1" : "fstp\t%y0";
8903 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8904 should be used. UNORDERED_P is true when fucom should be used. */
8907 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8910 rtx cmp_op0, cmp_op1;
8911 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8915 cmp_op0 = operands[0];
8916 cmp_op1 = operands[1];
8920 cmp_op0 = operands[1];
8921 cmp_op1 = operands[2];
8926 if (GET_MODE (operands[0]) == SFmode)
8928 return "ucomiss\t{%1, %0|%0, %1}";
8930 return "comiss\t{%1, %0|%0, %1}";
8933 return "ucomisd\t{%1, %0|%0, %1}";
8935 return "comisd\t{%1, %0|%0, %1}";
8938 gcc_assert (STACK_TOP_P (cmp_op0));
8940 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8942 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8946 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8947 return output_387_ffreep (operands, 1);
8950 return "ftst\n\tfnstsw\t%0";
8953 if (STACK_REG_P (cmp_op1)
8955 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8956 && REGNO (cmp_op1) != FIRST_STACK_REG)
8958 /* If both the top of the 387 stack dies, and the other operand
8959 is also a stack register that dies, then this must be a
8960 `fcompp' float compare */
8964 /* There is no double popping fcomi variant. Fortunately,
8965 eflags is immune from the fstp's cc clobbering. */
8967 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8969 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8970 return output_387_ffreep (operands, 0);
8975 return "fucompp\n\tfnstsw\t%0";
8977 return "fcompp\n\tfnstsw\t%0";
8982 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8984 static const char * const alt[16] =
8986 "fcom%z2\t%y2\n\tfnstsw\t%0",
8987 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8988 "fucom%z2\t%y2\n\tfnstsw\t%0",
8989 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8991 "ficom%z2\t%y2\n\tfnstsw\t%0",
8992 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8996 "fcomi\t{%y1, %0|%0, %y1}",
8997 "fcomip\t{%y1, %0|%0, %y1}",
8998 "fucomi\t{%y1, %0|%0, %y1}",
8999 "fucomip\t{%y1, %0|%0, %y1}",
9010 mask = eflags_p << 3;
9011 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
9012 mask |= unordered_p << 1;
9013 mask |= stack_top_dies;
9015 gcc_assert (mask < 16);
9024 ix86_output_addr_vec_elt (FILE *file, int value)
9026 const char *directive = ASM_LONG;
9030 directive = ASM_QUAD;
9032 gcc_assert (!TARGET_64BIT);
9035 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
9039 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
9042 fprintf (file, "%s%s%d-%s%d\n",
9043 ASM_LONG, LPREFIX, value, LPREFIX, rel);
9044 else if (HAVE_AS_GOTOFF_IN_DATA)
9045 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
9047 else if (TARGET_MACHO)
9049 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
9050 machopic_output_function_base_name (file);
9051 fprintf(file, "\n");
9055 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
9056 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
9059 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
9063 ix86_expand_clear (rtx dest)
9067 /* We play register width games, which are only valid after reload. */
9068 gcc_assert (reload_completed);
9070 /* Avoid HImode and its attendant prefix byte. */
9071 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
9072 dest = gen_rtx_REG (SImode, REGNO (dest));
9074 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
9076 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
9077 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
9079 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
9080 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9086 /* X is an unchanging MEM. If it is a constant pool reference, return
9087 the constant pool rtx, else NULL. */
9090 maybe_get_pool_constant (rtx x)
9092 x = ix86_delegitimize_address (XEXP (x, 0));
9094 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9095 return get_pool_constant (x);
9101 ix86_expand_move (enum machine_mode mode, rtx operands[])
9103 int strict = (reload_in_progress || reload_completed);
9105 enum tls_model model;
9110 if (GET_CODE (op1) == SYMBOL_REF)
9112 model = SYMBOL_REF_TLS_MODEL (op1);
9115 op1 = legitimize_tls_address (op1, model, true);
9116 op1 = force_operand (op1, op0);
9121 else if (GET_CODE (op1) == CONST
9122 && GET_CODE (XEXP (op1, 0)) == PLUS
9123 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
9125 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
9128 rtx addend = XEXP (XEXP (op1, 0), 1);
9129 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
9130 op1 = force_operand (op1, NULL);
9131 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
9132 op0, 1, OPTAB_DIRECT);
9138 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
9140 if (TARGET_MACHO && !TARGET_64BIT)
9145 rtx temp = ((reload_in_progress
9146 || ((op0 && GET_CODE (op0) == REG)
9148 ? op0 : gen_reg_rtx (Pmode));
9149 op1 = machopic_indirect_data_reference (op1, temp);
9150 op1 = machopic_legitimize_pic_address (op1, mode,
9151 temp == op1 ? 0 : temp);
9153 else if (MACHOPIC_INDIRECT)
9154 op1 = machopic_indirect_data_reference (op1, 0);
9161 if (GET_CODE (op0) == MEM)
9162 op1 = force_reg (Pmode, op1);
9164 op1 = legitimize_address (op1, op1, Pmode);
9169 if (GET_CODE (op0) == MEM
9170 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
9171 || !push_operand (op0, mode))
9172 && GET_CODE (op1) == MEM)
9173 op1 = force_reg (mode, op1);
9175 if (push_operand (op0, mode)
9176 && ! general_no_elim_operand (op1, mode))
9177 op1 = copy_to_mode_reg (mode, op1);
9179 /* Force large constants in 64bit compilation into register
9180 to get them CSEed. */
9181 if (TARGET_64BIT && mode == DImode
9182 && immediate_operand (op1, mode)
9183 && !x86_64_zext_immediate_operand (op1, VOIDmode)
9184 && !register_operand (op0, mode)
9185 && optimize && !reload_completed && !reload_in_progress)
9186 op1 = copy_to_mode_reg (mode, op1);
9188 if (FLOAT_MODE_P (mode))
9190 /* If we are loading a floating point constant to a register,
9191 force the value to memory now, since we'll get better code
9192 out the back end. */
9196 else if (GET_CODE (op1) == CONST_DOUBLE)
9198 op1 = validize_mem (force_const_mem (mode, op1));
9199 if (!register_operand (op0, mode))
9201 rtx temp = gen_reg_rtx (mode);
9202 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
9203 emit_move_insn (op0, temp);
9210 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9214 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
9216 rtx op0 = operands[0], op1 = operands[1];
9218 /* Force constants other than zero into memory. We do not know how
9219 the instructions used to build constants modify the upper 64 bits
9220 of the register, once we have that information we may be able
9221 to handle some of them more efficiently. */
9222 if ((reload_in_progress | reload_completed) == 0
9223 && register_operand (op0, mode)
9225 && standard_sse_constant_p (op1) <= 0)
9226 op1 = validize_mem (force_const_mem (mode, op1));
9228 /* Make operand1 a register if it isn't already. */
9230 && !register_operand (op0, mode)
9231 && !register_operand (op1, mode))
9233 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
9237 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9240 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
9241 straight to ix86_expand_vector_move. */
9244 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
9253 /* If we're optimizing for size, movups is the smallest. */
9256 op0 = gen_lowpart (V4SFmode, op0);
9257 op1 = gen_lowpart (V4SFmode, op1);
9258 emit_insn (gen_sse_movups (op0, op1));
9262 /* ??? If we have typed data, then it would appear that using
9263 movdqu is the only way to get unaligned data loaded with
9265 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9267 op0 = gen_lowpart (V16QImode, op0);
9268 op1 = gen_lowpart (V16QImode, op1);
9269 emit_insn (gen_sse2_movdqu (op0, op1));
9273 if (TARGET_SSE2 && mode == V2DFmode)
9277 /* When SSE registers are split into halves, we can avoid
9278 writing to the top half twice. */
9279 if (TARGET_SSE_SPLIT_REGS)
9281 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9286 /* ??? Not sure about the best option for the Intel chips.
9287 The following would seem to satisfy; the register is
9288 entirely cleared, breaking the dependency chain. We
9289 then store to the upper half, with a dependency depth
9290 of one. A rumor has it that Intel recommends two movsd
9291 followed by an unpacklpd, but this is unconfirmed. And
9292 given that the dependency depth of the unpacklpd would
9293 still be one, I'm not sure why this would be better. */
9294 zero = CONST0_RTX (V2DFmode);
9297 m = adjust_address (op1, DFmode, 0);
9298 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9299 m = adjust_address (op1, DFmode, 8);
9300 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9304 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9305 emit_move_insn (op0, CONST0_RTX (mode));
9307 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9309 if (mode != V4SFmode)
9310 op0 = gen_lowpart (V4SFmode, op0);
9311 m = adjust_address (op1, V2SFmode, 0);
9312 emit_insn (gen_sse_loadlps (op0, op0, m));
9313 m = adjust_address (op1, V2SFmode, 8);
9314 emit_insn (gen_sse_loadhps (op0, op0, m));
9317 else if (MEM_P (op0))
9319 /* If we're optimizing for size, movups is the smallest. */
9322 op0 = gen_lowpart (V4SFmode, op0);
9323 op1 = gen_lowpart (V4SFmode, op1);
9324 emit_insn (gen_sse_movups (op0, op1));
9328 /* ??? Similar to above, only less clear because of quote
9329 typeless stores unquote. */
9330 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
9331 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9333 op0 = gen_lowpart (V16QImode, op0);
9334 op1 = gen_lowpart (V16QImode, op1);
9335 emit_insn (gen_sse2_movdqu (op0, op1));
9339 if (TARGET_SSE2 && mode == V2DFmode)
9341 m = adjust_address (op0, DFmode, 0);
9342 emit_insn (gen_sse2_storelpd (m, op1));
9343 m = adjust_address (op0, DFmode, 8);
9344 emit_insn (gen_sse2_storehpd (m, op1));
9348 if (mode != V4SFmode)
9349 op1 = gen_lowpart (V4SFmode, op1);
9350 m = adjust_address (op0, V2SFmode, 0);
9351 emit_insn (gen_sse_storelps (m, op1));
9352 m = adjust_address (op0, V2SFmode, 8);
9353 emit_insn (gen_sse_storehps (m, op1));
9360 /* Expand a push in MODE. This is some mode for which we do not support
9361 proper push instructions, at least from the registers that we expect
9362 the value to live in. */
9365 ix86_expand_push (enum machine_mode mode, rtx x)
9369 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
9370 GEN_INT (-GET_MODE_SIZE (mode)),
9371 stack_pointer_rtx, 1, OPTAB_DIRECT);
9372 if (tmp != stack_pointer_rtx)
9373 emit_move_insn (stack_pointer_rtx, tmp);
9375 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
9376 emit_move_insn (tmp, x);
9379 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
9380 destination to use for the operation. If different from the true
9381 destination in operands[0], a copy operation will be required. */
9384 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
9387 int matching_memory;
9388 rtx src1, src2, dst;
9394 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
9395 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9396 && (rtx_equal_p (dst, src2)
9397 || immediate_operand (src1, mode)))
9404 /* If the destination is memory, and we do not have matching source
9405 operands, do things in registers. */
9406 matching_memory = 0;
9407 if (GET_CODE (dst) == MEM)
9409 if (rtx_equal_p (dst, src1))
9410 matching_memory = 1;
9411 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9412 && rtx_equal_p (dst, src2))
9413 matching_memory = 2;
9415 dst = gen_reg_rtx (mode);
9418 /* Both source operands cannot be in memory. */
9419 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
9421 if (matching_memory != 2)
9422 src2 = force_reg (mode, src2);
9424 src1 = force_reg (mode, src1);
9427 /* If the operation is not commutable, source 1 cannot be a constant
9428 or non-matching memory. */
9429 if ((CONSTANT_P (src1)
9430 || (!matching_memory && GET_CODE (src1) == MEM))
9431 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9432 src1 = force_reg (mode, src1);
9434 src1 = operands[1] = src1;
9435 src2 = operands[2] = src2;
9439 /* Similarly, but assume that the destination has already been
9443 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
9444 enum machine_mode mode, rtx operands[])
9446 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
9447 gcc_assert (dst == operands[0]);
9450 /* Attempt to expand a binary operator. Make the expansion closer to the
9451 actual machine, then just general_operand, which will allow 3 separate
9452 memory references (one output, two input) in a single insn. */
9455 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
9458 rtx src1, src2, dst, op, clob;
9460 dst = ix86_fixup_binary_operands (code, mode, operands);
9464 /* Emit the instruction. */
9466 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
9467 if (reload_in_progress)
9469 /* Reload doesn't know about the flags register, and doesn't know that
9470 it doesn't want to clobber it. We can only do this with PLUS. */
9471 gcc_assert (code == PLUS);
9476 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9477 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9480 /* Fix up the destination if needed. */
9481 if (dst != operands[0])
9482 emit_move_insn (operands[0], dst);
9485 /* Return TRUE or FALSE depending on whether the binary operator meets the
9486 appropriate constraints. */
9489 ix86_binary_operator_ok (enum rtx_code code,
9490 enum machine_mode mode ATTRIBUTE_UNUSED,
9493 /* Both source operands cannot be in memory. */
9494 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
9496 /* If the operation is not commutable, source 1 cannot be a constant. */
9497 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9499 /* If the destination is memory, we must have a matching source operand. */
9500 if (GET_CODE (operands[0]) == MEM
9501 && ! (rtx_equal_p (operands[0], operands[1])
9502 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9503 && rtx_equal_p (operands[0], operands[2]))))
9505 /* If the operation is not commutable and the source 1 is memory, we must
9506 have a matching destination. */
9507 if (GET_CODE (operands[1]) == MEM
9508 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
9509 && ! rtx_equal_p (operands[0], operands[1]))
9514 /* Attempt to expand a unary operator. Make the expansion closer to the
9515 actual machine, then just general_operand, which will allow 2 separate
9516 memory references (one output, one input) in a single insn. */
9519 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
9522 int matching_memory;
9523 rtx src, dst, op, clob;
9528 /* If the destination is memory, and we do not have matching source
9529 operands, do things in registers. */
9530 matching_memory = 0;
9533 if (rtx_equal_p (dst, src))
9534 matching_memory = 1;
9536 dst = gen_reg_rtx (mode);
9539 /* When source operand is memory, destination must match. */
9540 if (MEM_P (src) && !matching_memory)
9541 src = force_reg (mode, src);
9543 /* Emit the instruction. */
9545 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
9546 if (reload_in_progress || code == NOT)
9548 /* Reload doesn't know about the flags register, and doesn't know that
9549 it doesn't want to clobber it. */
9550 gcc_assert (code == NOT);
9555 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9556 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9559 /* Fix up the destination if needed. */
9560 if (dst != operands[0])
9561 emit_move_insn (operands[0], dst);
9564 /* Return TRUE or FALSE depending on whether the unary operator meets the
9565 appropriate constraints. */
9568 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
9569 enum machine_mode mode ATTRIBUTE_UNUSED,
9570 rtx operands[2] ATTRIBUTE_UNUSED)
9572 /* If one of operands is memory, source and destination must match. */
9573 if ((GET_CODE (operands[0]) == MEM
9574 || GET_CODE (operands[1]) == MEM)
9575 && ! rtx_equal_p (operands[0], operands[1]))
9580 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
9581 Create a mask for the sign bit in MODE for an SSE register. If VECT is
9582 true, then replicate the mask for all elements of the vector register.
9583 If INVERT is true, then create a mask excluding the sign bit. */
9586 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
9588 enum machine_mode vec_mode;
9589 HOST_WIDE_INT hi, lo;
9594 /* Find the sign bit, sign extended to 2*HWI. */
9596 lo = 0x80000000, hi = lo < 0;
9597 else if (HOST_BITS_PER_WIDE_INT >= 64)
9598 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
9600 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
9605 /* Force this value into the low part of a fp vector constant. */
9606 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
9607 mask = gen_lowpart (mode, mask);
9612 v = gen_rtvec (4, mask, mask, mask, mask);
9614 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
9615 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9616 vec_mode = V4SFmode;
9621 v = gen_rtvec (2, mask, mask);
9623 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
9624 vec_mode = V2DFmode;
9627 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
9630 /* Generate code for floating point ABS or NEG. */
9633 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
9636 rtx mask, set, use, clob, dst, src;
9637 bool matching_memory;
9638 bool use_sse = false;
9639 bool vector_mode = VECTOR_MODE_P (mode);
9640 enum machine_mode elt_mode = mode;
9644 elt_mode = GET_MODE_INNER (mode);
9647 else if (TARGET_SSE_MATH)
9648 use_sse = SSE_FLOAT_MODE_P (mode);
9650 /* NEG and ABS performed with SSE use bitwise mask operations.
9651 Create the appropriate mask now. */
9653 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
9660 /* If the destination is memory, and we don't have matching source
9661 operands or we're using the x87, do things in registers. */
9662 matching_memory = false;
9665 if (use_sse && rtx_equal_p (dst, src))
9666 matching_memory = true;
9668 dst = gen_reg_rtx (mode);
9670 if (MEM_P (src) && !matching_memory)
9671 src = force_reg (mode, src);
9675 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
9676 set = gen_rtx_SET (VOIDmode, dst, set);
9681 set = gen_rtx_fmt_e (code, mode, src);
9682 set = gen_rtx_SET (VOIDmode, dst, set);
9685 use = gen_rtx_USE (VOIDmode, mask);
9686 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9687 emit_insn (gen_rtx_PARALLEL (VOIDmode,
9688 gen_rtvec (3, set, use, clob)));
9694 if (dst != operands[0])
9695 emit_move_insn (operands[0], dst);
9698 /* Expand a copysign operation. Special case operand 0 being a constant. */
9701 ix86_expand_copysign (rtx operands[])
9703 enum machine_mode mode, vmode;
9704 rtx dest, op0, op1, mask, nmask;
9710 mode = GET_MODE (dest);
9711 vmode = mode == SFmode ? V4SFmode : V2DFmode;
9713 if (GET_CODE (op0) == CONST_DOUBLE)
9717 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
9718 op0 = simplify_unary_operation (ABS, mode, op0, mode);
9720 if (op0 == CONST0_RTX (mode))
9721 op0 = CONST0_RTX (vmode);
9725 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
9726 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9728 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
9729 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
9732 mask = ix86_build_signbit_mask (mode, 0, 0);
9735 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
9737 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
9741 nmask = ix86_build_signbit_mask (mode, 0, 1);
9742 mask = ix86_build_signbit_mask (mode, 0, 0);
9745 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
9747 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
9751 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
9752 be a constant, and so has already been expanded into a vector constant. */
9755 ix86_split_copysign_const (rtx operands[])
9757 enum machine_mode mode, vmode;
9758 rtx dest, op0, op1, mask, x;
9765 mode = GET_MODE (dest);
9766 vmode = GET_MODE (mask);
9768 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9769 x = gen_rtx_AND (vmode, dest, mask);
9770 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9772 if (op0 != CONST0_RTX (vmode))
9774 x = gen_rtx_IOR (vmode, dest, op0);
9775 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9779 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
9780 so we have to do two masks. */
9783 ix86_split_copysign_var (rtx operands[])
9785 enum machine_mode mode, vmode;
9786 rtx dest, scratch, op0, op1, mask, nmask, x;
9789 scratch = operands[1];
9792 nmask = operands[4];
9795 mode = GET_MODE (dest);
9796 vmode = GET_MODE (mask);
9798 if (rtx_equal_p (op0, op1))
9800 /* Shouldn't happen often (it's useless, obviously), but when it does
9801 we'd generate incorrect code if we continue below. */
9802 emit_move_insn (dest, op0);
9806 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
9808 gcc_assert (REGNO (op1) == REGNO (scratch));
9810 x = gen_rtx_AND (vmode, scratch, mask);
9811 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9814 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9815 x = gen_rtx_NOT (vmode, dest);
9816 x = gen_rtx_AND (vmode, x, op0);
9817 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9821 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
9823 x = gen_rtx_AND (vmode, scratch, mask);
9825 else /* alternative 2,4 */
9827 gcc_assert (REGNO (mask) == REGNO (scratch));
9828 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
9829 x = gen_rtx_AND (vmode, scratch, op1);
9831 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9833 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9835 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9836 x = gen_rtx_AND (vmode, dest, nmask);
9838 else /* alternative 3,4 */
9840 gcc_assert (REGNO (nmask) == REGNO (dest));
9842 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9843 x = gen_rtx_AND (vmode, dest, op0);
9845 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9848 x = gen_rtx_IOR (vmode, dest, scratch);
9849 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9852 /* Return TRUE or FALSE depending on whether the first SET in INSN
9853 has source and destination with matching CC modes, and that the
9854 CC mode is at least as constrained as REQ_MODE. */
9857 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9860 enum machine_mode set_mode;
9862 set = PATTERN (insn);
9863 if (GET_CODE (set) == PARALLEL)
9864 set = XVECEXP (set, 0, 0);
9865 gcc_assert (GET_CODE (set) == SET);
9866 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9868 set_mode = GET_MODE (SET_DEST (set));
9872 if (req_mode != CCNOmode
9873 && (req_mode != CCmode
9874 || XEXP (SET_SRC (set), 1) != const0_rtx))
9878 if (req_mode == CCGCmode)
9882 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9886 if (req_mode == CCZmode)
9896 return (GET_MODE (SET_SRC (set)) == set_mode);
9899 /* Generate insn patterns to do an integer compare of OPERANDS. */
9902 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9904 enum machine_mode cmpmode;
9907 cmpmode = SELECT_CC_MODE (code, op0, op1);
9908 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9910 /* This is very simple, but making the interface the same as in the
9911 FP case makes the rest of the code easier. */
9912 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9913 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9915 /* Return the test that should be put into the flags user, i.e.
9916 the bcc, scc, or cmov instruction. */
9917 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9920 /* Figure out whether to use ordered or unordered fp comparisons.
9921 Return the appropriate mode to use. */
9924 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9926 /* ??? In order to make all comparisons reversible, we do all comparisons
9927 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9928 all forms trapping and nontrapping comparisons, we can make inequality
9929 comparisons trapping again, since it results in better code when using
9930 FCOM based compares. */
9931 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9935 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9937 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9938 return ix86_fp_compare_mode (code);
9941 /* Only zero flag is needed. */
9943 case NE: /* ZF!=0 */
9945 /* Codes needing carry flag. */
9946 case GEU: /* CF=0 */
9947 case GTU: /* CF=0 & ZF=0 */
9948 case LTU: /* CF=1 */
9949 case LEU: /* CF=1 | ZF=1 */
9951 /* Codes possibly doable only with sign flag when
9952 comparing against zero. */
9953 case GE: /* SF=OF or SF=0 */
9954 case LT: /* SF<>OF or SF=1 */
9955 if (op1 == const0_rtx)
9958 /* For other cases Carry flag is not required. */
9960 /* Codes doable only with sign flag when comparing
9961 against zero, but we miss jump instruction for it
9962 so we need to use relational tests against overflow
9963 that thus needs to be zero. */
9964 case GT: /* ZF=0 & SF=OF */
9965 case LE: /* ZF=1 | SF<>OF */
9966 if (op1 == const0_rtx)
9970 /* strcmp pattern do (use flags) and combine may ask us for proper
9979 /* Return the fixed registers used for condition codes. */
9982 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9989 /* If two condition code modes are compatible, return a condition code
9990 mode which is compatible with both. Otherwise, return
9993 static enum machine_mode
9994 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
9999 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
10002 if ((m1 == CCGCmode && m2 == CCGOCmode)
10003 || (m1 == CCGOCmode && m2 == CCGCmode))
10009 gcc_unreachable ();
10031 /* These are only compatible with themselves, which we already
10037 /* Return true if we should use an FCOMI instruction for this fp comparison. */
10040 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
10042 enum rtx_code swapped_code = swap_condition (code);
10043 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
10044 || (ix86_fp_comparison_cost (swapped_code)
10045 == ix86_fp_comparison_fcomi_cost (swapped_code)));
10048 /* Swap, force into registers, or otherwise massage the two operands
10049 to a fp comparison. The operands are updated in place; the new
10050 comparison code is returned. */
10052 static enum rtx_code
10053 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
10055 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
10056 rtx op0 = *pop0, op1 = *pop1;
10057 enum machine_mode op_mode = GET_MODE (op0);
10058 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
10060 /* All of the unordered compare instructions only work on registers.
10061 The same is true of the fcomi compare instructions. The XFmode
10062 compare instructions require registers except when comparing
10063 against zero or when converting operand 1 from fixed point to
10067 && (fpcmp_mode == CCFPUmode
10068 || (op_mode == XFmode
10069 && ! (standard_80387_constant_p (op0) == 1
10070 || standard_80387_constant_p (op1) == 1)
10071 && GET_CODE (op1) != FLOAT)
10072 || ix86_use_fcomi_compare (code)))
10074 op0 = force_reg (op_mode, op0);
10075 op1 = force_reg (op_mode, op1);
10079 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
10080 things around if they appear profitable, otherwise force op0
10081 into a register. */
10083 if (standard_80387_constant_p (op0) == 0
10084 || (GET_CODE (op0) == MEM
10085 && ! (standard_80387_constant_p (op1) == 0
10086 || GET_CODE (op1) == MEM)))
10089 tmp = op0, op0 = op1, op1 = tmp;
10090 code = swap_condition (code);
10093 if (GET_CODE (op0) != REG)
10094 op0 = force_reg (op_mode, op0);
10096 if (CONSTANT_P (op1))
10098 int tmp = standard_80387_constant_p (op1);
10100 op1 = validize_mem (force_const_mem (op_mode, op1));
10104 op1 = force_reg (op_mode, op1);
10107 op1 = force_reg (op_mode, op1);
10111 /* Try to rearrange the comparison to make it cheaper. */
10112 if (ix86_fp_comparison_cost (code)
10113 > ix86_fp_comparison_cost (swap_condition (code))
10114 && (GET_CODE (op1) == REG || !no_new_pseudos))
10117 tmp = op0, op0 = op1, op1 = tmp;
10118 code = swap_condition (code);
10119 if (GET_CODE (op0) != REG)
10120 op0 = force_reg (op_mode, op0);
10128 /* Convert comparison codes we use to represent FP comparison to integer
10129 code that will result in proper branch. Return UNKNOWN if no such code
10133 ix86_fp_compare_code_to_integer (enum rtx_code code)
10162 /* Split comparison code CODE into comparisons we can do using branch
10163 instructions. BYPASS_CODE is comparison code for branch that will
10164 branch around FIRST_CODE and SECOND_CODE. If some of branches
10165 is not required, set value to UNKNOWN.
10166 We never require more than two branches. */
10169 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
10170 enum rtx_code *first_code,
10171 enum rtx_code *second_code)
10173 *first_code = code;
10174 *bypass_code = UNKNOWN;
10175 *second_code = UNKNOWN;
10177 /* The fcomi comparison sets flags as follows:
10187 case GT: /* GTU - CF=0 & ZF=0 */
10188 case GE: /* GEU - CF=0 */
10189 case ORDERED: /* PF=0 */
10190 case UNORDERED: /* PF=1 */
10191 case UNEQ: /* EQ - ZF=1 */
10192 case UNLT: /* LTU - CF=1 */
10193 case UNLE: /* LEU - CF=1 | ZF=1 */
10194 case LTGT: /* EQ - ZF=0 */
10196 case LT: /* LTU - CF=1 - fails on unordered */
10197 *first_code = UNLT;
10198 *bypass_code = UNORDERED;
10200 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
10201 *first_code = UNLE;
10202 *bypass_code = UNORDERED;
10204 case EQ: /* EQ - ZF=1 - fails on unordered */
10205 *first_code = UNEQ;
10206 *bypass_code = UNORDERED;
10208 case NE: /* NE - ZF=0 - fails on unordered */
10209 *first_code = LTGT;
10210 *second_code = UNORDERED;
10212 case UNGE: /* GEU - CF=0 - fails on unordered */
10214 *second_code = UNORDERED;
10216 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
10218 *second_code = UNORDERED;
10221 gcc_unreachable ();
10223 if (!TARGET_IEEE_FP)
10225 *second_code = UNKNOWN;
10226 *bypass_code = UNKNOWN;
10230 /* Return cost of comparison done fcom + arithmetics operations on AX.
10231 All following functions do use number of instructions as a cost metrics.
10232 In future this should be tweaked to compute bytes for optimize_size and
10233 take into account performance of various instructions on various CPUs. */
10235 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
10237 if (!TARGET_IEEE_FP)
10239 /* The cost of code output by ix86_expand_fp_compare. */
10263 gcc_unreachable ();
10267 /* Return cost of comparison done using fcomi operation.
10268 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10270 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
10272 enum rtx_code bypass_code, first_code, second_code;
10273 /* Return arbitrarily high cost when instruction is not supported - this
10274 prevents gcc from using it. */
10277 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10278 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
10281 /* Return cost of comparison done using sahf operation.
10282 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10284 ix86_fp_comparison_sahf_cost (enum rtx_code code)
10286 enum rtx_code bypass_code, first_code, second_code;
10287 /* Return arbitrarily high cost when instruction is not preferred - this
10288 avoids gcc from using it. */
10289 if (!TARGET_USE_SAHF && !optimize_size)
10291 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10292 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
10295 /* Compute cost of the comparison done using any method.
10296 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10298 ix86_fp_comparison_cost (enum rtx_code code)
10300 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
10303 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
10304 sahf_cost = ix86_fp_comparison_sahf_cost (code);
10306 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
10307 if (min > sahf_cost)
10309 if (min > fcomi_cost)
10314 /* Generate insn patterns to do a floating point compare of OPERANDS. */
10317 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
10318 rtx *second_test, rtx *bypass_test)
10320 enum machine_mode fpcmp_mode, intcmp_mode;
10322 int cost = ix86_fp_comparison_cost (code);
10323 enum rtx_code bypass_code, first_code, second_code;
10325 fpcmp_mode = ix86_fp_compare_mode (code);
10326 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
10329 *second_test = NULL_RTX;
10331 *bypass_test = NULL_RTX;
10333 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10335 /* Do fcomi/sahf based test when profitable. */
10336 if ((bypass_code == UNKNOWN || bypass_test)
10337 && (second_code == UNKNOWN || second_test)
10338 && ix86_fp_comparison_arithmetics_cost (code) > cost)
10342 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10343 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
10349 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10350 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10352 scratch = gen_reg_rtx (HImode);
10353 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10354 emit_insn (gen_x86_sahf_1 (scratch));
10357 /* The FP codes work out to act like unsigned. */
10358 intcmp_mode = fpcmp_mode;
10360 if (bypass_code != UNKNOWN)
10361 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
10362 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10364 if (second_code != UNKNOWN)
10365 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
10366 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10371 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
10372 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10373 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10375 scratch = gen_reg_rtx (HImode);
10376 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10378 /* In the unordered case, we have to check C2 for NaN's, which
10379 doesn't happen to work out to anything nice combination-wise.
10380 So do some bit twiddling on the value we've got in AH to come
10381 up with an appropriate set of condition codes. */
10383 intcmp_mode = CCNOmode;
10388 if (code == GT || !TARGET_IEEE_FP)
10390 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10395 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10396 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10397 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
10398 intcmp_mode = CCmode;
10404 if (code == LT && TARGET_IEEE_FP)
10406 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10407 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
10408 intcmp_mode = CCmode;
10413 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
10419 if (code == GE || !TARGET_IEEE_FP)
10421 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
10426 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10427 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10434 if (code == LE && TARGET_IEEE_FP)
10436 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10437 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10438 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10439 intcmp_mode = CCmode;
10444 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10450 if (code == EQ && TARGET_IEEE_FP)
10452 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10453 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10454 intcmp_mode = CCmode;
10459 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10466 if (code == NE && TARGET_IEEE_FP)
10468 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10469 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10475 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10481 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10485 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10490 gcc_unreachable ();
10494 /* Return the test that should be put into the flags user, i.e.
10495 the bcc, scc, or cmov instruction. */
10496 return gen_rtx_fmt_ee (code, VOIDmode,
10497 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10502 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
10505 op0 = ix86_compare_op0;
10506 op1 = ix86_compare_op1;
10509 *second_test = NULL_RTX;
10511 *bypass_test = NULL_RTX;
10513 if (ix86_compare_emitted)
10515 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
10516 ix86_compare_emitted = NULL_RTX;
10518 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
10519 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10520 second_test, bypass_test);
10522 ret = ix86_expand_int_compare (code, op0, op1);
10527 /* Return true if the CODE will result in nontrivial jump sequence. */
10529 ix86_fp_jump_nontrivial_p (enum rtx_code code)
10531 enum rtx_code bypass_code, first_code, second_code;
10534 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10535 return bypass_code != UNKNOWN || second_code != UNKNOWN;
10539 ix86_expand_branch (enum rtx_code code, rtx label)
10543 /* If we have emitted a compare insn, go straight to simple.
10544 ix86_expand_compare won't emit anything if ix86_compare_emitted
10546 if (ix86_compare_emitted)
10549 switch (GET_MODE (ix86_compare_op0))
10555 tmp = ix86_expand_compare (code, NULL, NULL);
10556 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10557 gen_rtx_LABEL_REF (VOIDmode, label),
10559 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
10568 enum rtx_code bypass_code, first_code, second_code;
10570 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
10571 &ix86_compare_op1);
10573 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10575 /* Check whether we will use the natural sequence with one jump. If
10576 so, we can expand jump early. Otherwise delay expansion by
10577 creating compound insn to not confuse optimizers. */
10578 if (bypass_code == UNKNOWN && second_code == UNKNOWN
10581 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
10582 gen_rtx_LABEL_REF (VOIDmode, label),
10583 pc_rtx, NULL_RTX, NULL_RTX);
10587 tmp = gen_rtx_fmt_ee (code, VOIDmode,
10588 ix86_compare_op0, ix86_compare_op1);
10589 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10590 gen_rtx_LABEL_REF (VOIDmode, label),
10592 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
10594 use_fcomi = ix86_use_fcomi_compare (code);
10595 vec = rtvec_alloc (3 + !use_fcomi);
10596 RTVEC_ELT (vec, 0) = tmp;
10598 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
10600 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
10603 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
10605 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
10614 /* Expand DImode branch into multiple compare+branch. */
10616 rtx lo[2], hi[2], label2;
10617 enum rtx_code code1, code2, code3;
10618 enum machine_mode submode;
10620 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
10622 tmp = ix86_compare_op0;
10623 ix86_compare_op0 = ix86_compare_op1;
10624 ix86_compare_op1 = tmp;
10625 code = swap_condition (code);
10627 if (GET_MODE (ix86_compare_op0) == DImode)
10629 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
10630 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
10635 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
10636 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
10640 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
10641 avoid two branches. This costs one extra insn, so disable when
10642 optimizing for size. */
10644 if ((code == EQ || code == NE)
10646 || hi[1] == const0_rtx || lo[1] == const0_rtx))
10651 if (hi[1] != const0_rtx)
10652 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
10653 NULL_RTX, 0, OPTAB_WIDEN);
10656 if (lo[1] != const0_rtx)
10657 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
10658 NULL_RTX, 0, OPTAB_WIDEN);
10660 tmp = expand_binop (submode, ior_optab, xor1, xor0,
10661 NULL_RTX, 0, OPTAB_WIDEN);
10663 ix86_compare_op0 = tmp;
10664 ix86_compare_op1 = const0_rtx;
10665 ix86_expand_branch (code, label);
10669 /* Otherwise, if we are doing less-than or greater-or-equal-than,
10670 op1 is a constant and the low word is zero, then we can just
10671 examine the high word. */
10673 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
10676 case LT: case LTU: case GE: case GEU:
10677 ix86_compare_op0 = hi[0];
10678 ix86_compare_op1 = hi[1];
10679 ix86_expand_branch (code, label);
10685 /* Otherwise, we need two or three jumps. */
10687 label2 = gen_label_rtx ();
10690 code2 = swap_condition (code);
10691 code3 = unsigned_condition (code);
10695 case LT: case GT: case LTU: case GTU:
10698 case LE: code1 = LT; code2 = GT; break;
10699 case GE: code1 = GT; code2 = LT; break;
10700 case LEU: code1 = LTU; code2 = GTU; break;
10701 case GEU: code1 = GTU; code2 = LTU; break;
10703 case EQ: code1 = UNKNOWN; code2 = NE; break;
10704 case NE: code2 = UNKNOWN; break;
10707 gcc_unreachable ();
10712 * if (hi(a) < hi(b)) goto true;
10713 * if (hi(a) > hi(b)) goto false;
10714 * if (lo(a) < lo(b)) goto true;
10718 ix86_compare_op0 = hi[0];
10719 ix86_compare_op1 = hi[1];
10721 if (code1 != UNKNOWN)
10722 ix86_expand_branch (code1, label);
10723 if (code2 != UNKNOWN)
10724 ix86_expand_branch (code2, label2);
10726 ix86_compare_op0 = lo[0];
10727 ix86_compare_op1 = lo[1];
10728 ix86_expand_branch (code3, label);
10730 if (code2 != UNKNOWN)
10731 emit_label (label2);
10736 gcc_unreachable ();
10740 /* Split branch based on floating point condition. */
10742 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
10743 rtx target1, rtx target2, rtx tmp, rtx pushed)
10745 rtx second, bypass;
10746 rtx label = NULL_RTX;
10748 int bypass_probability = -1, second_probability = -1, probability = -1;
10751 if (target2 != pc_rtx)
10754 code = reverse_condition_maybe_unordered (code);
10759 condition = ix86_expand_fp_compare (code, op1, op2,
10760 tmp, &second, &bypass);
10762 /* Remove pushed operand from stack. */
10764 ix86_free_from_memory (GET_MODE (pushed));
10766 if (split_branch_probability >= 0)
10768 /* Distribute the probabilities across the jumps.
10769 Assume the BYPASS and SECOND to be always test
10771 probability = split_branch_probability;
10773 /* Value of 1 is low enough to make no need for probability
10774 to be updated. Later we may run some experiments and see
10775 if unordered values are more frequent in practice. */
10777 bypass_probability = 1;
10779 second_probability = 1;
10781 if (bypass != NULL_RTX)
10783 label = gen_label_rtx ();
10784 i = emit_jump_insn (gen_rtx_SET
10786 gen_rtx_IF_THEN_ELSE (VOIDmode,
10788 gen_rtx_LABEL_REF (VOIDmode,
10791 if (bypass_probability >= 0)
10793 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10794 GEN_INT (bypass_probability),
10797 i = emit_jump_insn (gen_rtx_SET
10799 gen_rtx_IF_THEN_ELSE (VOIDmode,
10800 condition, target1, target2)));
10801 if (probability >= 0)
10803 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10804 GEN_INT (probability),
10806 if (second != NULL_RTX)
10808 i = emit_jump_insn (gen_rtx_SET
10810 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
10812 if (second_probability >= 0)
10814 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10815 GEN_INT (second_probability),
10818 if (label != NULL_RTX)
10819 emit_label (label);
10823 ix86_expand_setcc (enum rtx_code code, rtx dest)
10825 rtx ret, tmp, tmpreg, equiv;
10826 rtx second_test, bypass_test;
10828 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
10829 return 0; /* FAIL */
10831 gcc_assert (GET_MODE (dest) == QImode);
10833 ret = ix86_expand_compare (code, &second_test, &bypass_test);
10834 PUT_MODE (ret, QImode);
10839 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10840 if (bypass_test || second_test)
10842 rtx test = second_test;
10844 rtx tmp2 = gen_reg_rtx (QImode);
10847 gcc_assert (!second_test);
10848 test = bypass_test;
10850 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10852 PUT_MODE (test, QImode);
10853 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10856 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10858 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10861 /* Attach a REG_EQUAL note describing the comparison result. */
10862 if (ix86_compare_op0 && ix86_compare_op1)
10864 equiv = simplify_gen_relational (code, QImode,
10865 GET_MODE (ix86_compare_op0),
10866 ix86_compare_op0, ix86_compare_op1);
10867 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10870 return 1; /* DONE */
10873 /* Expand comparison setting or clearing carry flag. Return true when
10874 successful and set pop for the operation. */
10876 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10878 enum machine_mode mode =
10879 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10881 /* Do not handle DImode compares that go through special path. Also we can't
10882 deal with FP compares yet. This is possible to add. */
10883 if (mode == (TARGET_64BIT ? TImode : DImode))
10885 if (FLOAT_MODE_P (mode))
10887 rtx second_test = NULL, bypass_test = NULL;
10888 rtx compare_op, compare_seq;
10890 /* Shortcut: following common codes never translate into carry flag compares. */
10891 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10892 || code == ORDERED || code == UNORDERED)
10895 /* These comparisons require zero flag; swap operands so they won't. */
10896 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10897 && !TARGET_IEEE_FP)
10902 code = swap_condition (code);
10905 /* Try to expand the comparison and verify that we end up with carry flag
10906 based comparison. This is fails to be true only when we decide to expand
10907 comparison using arithmetic that is not too common scenario. */
10909 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10910 &second_test, &bypass_test);
10911 compare_seq = get_insns ();
10914 if (second_test || bypass_test)
10916 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10917 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10918 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10920 code = GET_CODE (compare_op);
10921 if (code != LTU && code != GEU)
10923 emit_insn (compare_seq);
10927 if (!INTEGRAL_MODE_P (mode))
10935 /* Convert a==0 into (unsigned)a<1. */
10938 if (op1 != const0_rtx)
10941 code = (code == EQ ? LTU : GEU);
10944 /* Convert a>b into b<a or a>=b-1. */
10947 if (GET_CODE (op1) == CONST_INT)
10949 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10950 /* Bail out on overflow. We still can swap operands but that
10951 would force loading of the constant into register. */
10952 if (op1 == const0_rtx
10953 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10955 code = (code == GTU ? GEU : LTU);
10962 code = (code == GTU ? LTU : GEU);
10966 /* Convert a>=0 into (unsigned)a<0x80000000. */
10969 if (mode == DImode || op1 != const0_rtx)
10971 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10972 code = (code == LT ? GEU : LTU);
10976 if (mode == DImode || op1 != constm1_rtx)
10978 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10979 code = (code == LE ? GEU : LTU);
10985 /* Swapping operands may cause constant to appear as first operand. */
10986 if (!nonimmediate_operand (op0, VOIDmode))
10988 if (no_new_pseudos)
10990 op0 = force_reg (mode, op0);
10992 ix86_compare_op0 = op0;
10993 ix86_compare_op1 = op1;
10994 *pop = ix86_expand_compare (code, NULL, NULL);
10995 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
11000 ix86_expand_int_movcc (rtx operands[])
11002 enum rtx_code code = GET_CODE (operands[1]), compare_code;
11003 rtx compare_seq, compare_op;
11004 rtx second_test, bypass_test;
11005 enum machine_mode mode = GET_MODE (operands[0]);
11006 bool sign_bit_compare_p = false;;
11009 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11010 compare_seq = get_insns ();
11013 compare_code = GET_CODE (compare_op);
11015 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
11016 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
11017 sign_bit_compare_p = true;
11019 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
11020 HImode insns, we'd be swallowed in word prefix ops. */
11022 if ((mode != HImode || TARGET_FAST_PREFIX)
11023 && (mode != (TARGET_64BIT ? TImode : DImode))
11024 && GET_CODE (operands[2]) == CONST_INT
11025 && GET_CODE (operands[3]) == CONST_INT)
11027 rtx out = operands[0];
11028 HOST_WIDE_INT ct = INTVAL (operands[2]);
11029 HOST_WIDE_INT cf = INTVAL (operands[3]);
11030 HOST_WIDE_INT diff;
11033 /* Sign bit compares are better done using shifts than we do by using
11035 if (sign_bit_compare_p
11036 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11037 ix86_compare_op1, &compare_op))
11039 /* Detect overlap between destination and compare sources. */
11042 if (!sign_bit_compare_p)
11044 bool fpcmp = false;
11046 compare_code = GET_CODE (compare_op);
11048 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11049 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11052 compare_code = ix86_fp_compare_code_to_integer (compare_code);
11055 /* To simplify rest of code, restrict to the GEU case. */
11056 if (compare_code == LTU)
11058 HOST_WIDE_INT tmp = ct;
11061 compare_code = reverse_condition (compare_code);
11062 code = reverse_condition (code);
11067 PUT_CODE (compare_op,
11068 reverse_condition_maybe_unordered
11069 (GET_CODE (compare_op)));
11071 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11075 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
11076 || reg_overlap_mentioned_p (out, ix86_compare_op1))
11077 tmp = gen_reg_rtx (mode);
11079 if (mode == DImode)
11080 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
11082 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
11086 if (code == GT || code == GE)
11087 code = reverse_condition (code);
11090 HOST_WIDE_INT tmp = ct;
11095 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
11096 ix86_compare_op1, VOIDmode, 0, -1);
11109 tmp = expand_simple_binop (mode, PLUS,
11111 copy_rtx (tmp), 1, OPTAB_DIRECT);
11122 tmp = expand_simple_binop (mode, IOR,
11124 copy_rtx (tmp), 1, OPTAB_DIRECT);
11126 else if (diff == -1 && ct)
11136 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11138 tmp = expand_simple_binop (mode, PLUS,
11139 copy_rtx (tmp), GEN_INT (cf),
11140 copy_rtx (tmp), 1, OPTAB_DIRECT);
11148 * andl cf - ct, dest
11158 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11161 tmp = expand_simple_binop (mode, AND,
11163 gen_int_mode (cf - ct, mode),
11164 copy_rtx (tmp), 1, OPTAB_DIRECT);
11166 tmp = expand_simple_binop (mode, PLUS,
11167 copy_rtx (tmp), GEN_INT (ct),
11168 copy_rtx (tmp), 1, OPTAB_DIRECT);
11171 if (!rtx_equal_p (tmp, out))
11172 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
11174 return 1; /* DONE */
11180 tmp = ct, ct = cf, cf = tmp;
11182 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11184 /* We may be reversing unordered compare to normal compare, that
11185 is not valid in general (we may convert non-trapping condition
11186 to trapping one), however on i386 we currently emit all
11187 comparisons unordered. */
11188 compare_code = reverse_condition_maybe_unordered (compare_code);
11189 code = reverse_condition_maybe_unordered (code);
11193 compare_code = reverse_condition (compare_code);
11194 code = reverse_condition (code);
11198 compare_code = UNKNOWN;
11199 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
11200 && GET_CODE (ix86_compare_op1) == CONST_INT)
11202 if (ix86_compare_op1 == const0_rtx
11203 && (code == LT || code == GE))
11204 compare_code = code;
11205 else if (ix86_compare_op1 == constm1_rtx)
11209 else if (code == GT)
11214 /* Optimize dest = (op0 < 0) ? -1 : cf. */
11215 if (compare_code != UNKNOWN
11216 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
11217 && (cf == -1 || ct == -1))
11219 /* If lea code below could be used, only optimize
11220 if it results in a 2 insn sequence. */
11222 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
11223 || diff == 3 || diff == 5 || diff == 9)
11224 || (compare_code == LT && ct == -1)
11225 || (compare_code == GE && cf == -1))
11228 * notl op1 (if necessary)
11236 code = reverse_condition (code);
11239 out = emit_store_flag (out, code, ix86_compare_op0,
11240 ix86_compare_op1, VOIDmode, 0, -1);
11242 out = expand_simple_binop (mode, IOR,
11244 out, 1, OPTAB_DIRECT);
11245 if (out != operands[0])
11246 emit_move_insn (operands[0], out);
11248 return 1; /* DONE */
11253 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
11254 || diff == 3 || diff == 5 || diff == 9)
11255 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
11257 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
11263 * lea cf(dest*(ct-cf)),dest
11267 * This also catches the degenerate setcc-only case.
11273 out = emit_store_flag (out, code, ix86_compare_op0,
11274 ix86_compare_op1, VOIDmode, 0, 1);
11277 /* On x86_64 the lea instruction operates on Pmode, so we need
11278 to get arithmetics done in proper mode to match. */
11280 tmp = copy_rtx (out);
11284 out1 = copy_rtx (out);
11285 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
11289 tmp = gen_rtx_PLUS (mode, tmp, out1);
11295 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
11298 if (!rtx_equal_p (tmp, out))
11301 out = force_operand (tmp, copy_rtx (out));
11303 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
11305 if (!rtx_equal_p (out, operands[0]))
11306 emit_move_insn (operands[0], copy_rtx (out));
11308 return 1; /* DONE */
11312 * General case: Jumpful:
11313 * xorl dest,dest cmpl op1, op2
11314 * cmpl op1, op2 movl ct, dest
11315 * setcc dest jcc 1f
11316 * decl dest movl cf, dest
11317 * andl (cf-ct),dest 1:
11320 * Size 20. Size 14.
11322 * This is reasonably steep, but branch mispredict costs are
11323 * high on modern cpus, so consider failing only if optimizing
11327 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11328 && BRANCH_COST >= 2)
11334 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11335 /* We may be reversing unordered compare to normal compare,
11336 that is not valid in general (we may convert non-trapping
11337 condition to trapping one), however on i386 we currently
11338 emit all comparisons unordered. */
11339 code = reverse_condition_maybe_unordered (code);
11342 code = reverse_condition (code);
11343 if (compare_code != UNKNOWN)
11344 compare_code = reverse_condition (compare_code);
11348 if (compare_code != UNKNOWN)
11350 /* notl op1 (if needed)
11355 For x < 0 (resp. x <= -1) there will be no notl,
11356 so if possible swap the constants to get rid of the
11358 True/false will be -1/0 while code below (store flag
11359 followed by decrement) is 0/-1, so the constants need
11360 to be exchanged once more. */
11362 if (compare_code == GE || !cf)
11364 code = reverse_condition (code);
11369 HOST_WIDE_INT tmp = cf;
11374 out = emit_store_flag (out, code, ix86_compare_op0,
11375 ix86_compare_op1, VOIDmode, 0, -1);
11379 out = emit_store_flag (out, code, ix86_compare_op0,
11380 ix86_compare_op1, VOIDmode, 0, 1);
11382 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
11383 copy_rtx (out), 1, OPTAB_DIRECT);
11386 out = expand_simple_binop (mode, AND, copy_rtx (out),
11387 gen_int_mode (cf - ct, mode),
11388 copy_rtx (out), 1, OPTAB_DIRECT);
11390 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
11391 copy_rtx (out), 1, OPTAB_DIRECT);
11392 if (!rtx_equal_p (out, operands[0]))
11393 emit_move_insn (operands[0], copy_rtx (out));
11395 return 1; /* DONE */
11399 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11401 /* Try a few things more with specific constants and a variable. */
11404 rtx var, orig_out, out, tmp;
11406 if (BRANCH_COST <= 2)
11407 return 0; /* FAIL */
11409 /* If one of the two operands is an interesting constant, load a
11410 constant with the above and mask it in with a logical operation. */
11412 if (GET_CODE (operands[2]) == CONST_INT)
11415 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
11416 operands[3] = constm1_rtx, op = and_optab;
11417 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
11418 operands[3] = const0_rtx, op = ior_optab;
11420 return 0; /* FAIL */
11422 else if (GET_CODE (operands[3]) == CONST_INT)
11425 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
11426 operands[2] = constm1_rtx, op = and_optab;
11427 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
11428 operands[2] = const0_rtx, op = ior_optab;
11430 return 0; /* FAIL */
11433 return 0; /* FAIL */
11435 orig_out = operands[0];
11436 tmp = gen_reg_rtx (mode);
11439 /* Recurse to get the constant loaded. */
11440 if (ix86_expand_int_movcc (operands) == 0)
11441 return 0; /* FAIL */
11443 /* Mask in the interesting variable. */
11444 out = expand_binop (mode, op, var, tmp, orig_out, 0,
11446 if (!rtx_equal_p (out, orig_out))
11447 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
11449 return 1; /* DONE */
11453 * For comparison with above,
11463 if (! nonimmediate_operand (operands[2], mode))
11464 operands[2] = force_reg (mode, operands[2]);
11465 if (! nonimmediate_operand (operands[3], mode))
11466 operands[3] = force_reg (mode, operands[3]);
11468 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11470 rtx tmp = gen_reg_rtx (mode);
11471 emit_move_insn (tmp, operands[3]);
11474 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11476 rtx tmp = gen_reg_rtx (mode);
11477 emit_move_insn (tmp, operands[2]);
11481 if (! register_operand (operands[2], VOIDmode)
11483 || ! register_operand (operands[3], VOIDmode)))
11484 operands[2] = force_reg (mode, operands[2]);
11487 && ! register_operand (operands[3], VOIDmode))
11488 operands[3] = force_reg (mode, operands[3]);
11490 emit_insn (compare_seq);
11491 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11492 gen_rtx_IF_THEN_ELSE (mode,
11493 compare_op, operands[2],
11496 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11497 gen_rtx_IF_THEN_ELSE (mode,
11499 copy_rtx (operands[3]),
11500 copy_rtx (operands[0]))));
11502 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11503 gen_rtx_IF_THEN_ELSE (mode,
11505 copy_rtx (operands[2]),
11506 copy_rtx (operands[0]))));
11508 return 1; /* DONE */
11511 /* Swap, force into registers, or otherwise massage the two operands
11512 to an sse comparison with a mask result. Thus we differ a bit from
11513 ix86_prepare_fp_compare_args which expects to produce a flags result.
11515 The DEST operand exists to help determine whether to commute commutative
11516 operators. The POP0/POP1 operands are updated in place. The new
11517 comparison code is returned, or UNKNOWN if not implementable. */
11519 static enum rtx_code
11520 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
11521 rtx *pop0, rtx *pop1)
11529 /* We have no LTGT as an operator. We could implement it with
11530 NE & ORDERED, but this requires an extra temporary. It's
11531 not clear that it's worth it. */
11538 /* These are supported directly. */
11545 /* For commutative operators, try to canonicalize the destination
11546 operand to be first in the comparison - this helps reload to
11547 avoid extra moves. */
11548 if (!dest || !rtx_equal_p (dest, *pop1))
11556 /* These are not supported directly. Swap the comparison operands
11557 to transform into something that is supported. */
11561 code = swap_condition (code);
11565 gcc_unreachable ();
11571 /* Detect conditional moves that exactly match min/max operational
11572 semantics. Note that this is IEEE safe, as long as we don't
11573 interchange the operands.
11575 Returns FALSE if this conditional move doesn't match a MIN/MAX,
11576 and TRUE if the operation is successful and instructions are emitted. */
11579 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
11580 rtx cmp_op1, rtx if_true, rtx if_false)
11582 enum machine_mode mode;
11588 else if (code == UNGE)
11591 if_true = if_false;
11597 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
11599 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
11604 mode = GET_MODE (dest);
11606 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
11607 but MODE may be a vector mode and thus not appropriate. */
11608 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
11610 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
11613 if_true = force_reg (mode, if_true);
11614 v = gen_rtvec (2, if_true, if_false);
11615 tmp = gen_rtx_UNSPEC (mode, v, u);
11619 code = is_min ? SMIN : SMAX;
11620 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
11623 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
11627 /* Expand an sse vector comparison. Return the register with the result. */
11630 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
11631 rtx op_true, rtx op_false)
11633 enum machine_mode mode = GET_MODE (dest);
11636 cmp_op0 = force_reg (mode, cmp_op0);
11637 if (!nonimmediate_operand (cmp_op1, mode))
11638 cmp_op1 = force_reg (mode, cmp_op1);
11641 || reg_overlap_mentioned_p (dest, op_true)
11642 || reg_overlap_mentioned_p (dest, op_false))
11643 dest = gen_reg_rtx (mode);
11645 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
11646 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11651 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
11652 operations. This is used for both scalar and vector conditional moves. */
11655 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
11657 enum machine_mode mode = GET_MODE (dest);
11660 if (op_false == CONST0_RTX (mode))
11662 op_true = force_reg (mode, op_true);
11663 x = gen_rtx_AND (mode, cmp, op_true);
11664 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11666 else if (op_true == CONST0_RTX (mode))
11668 op_false = force_reg (mode, op_false);
11669 x = gen_rtx_NOT (mode, cmp);
11670 x = gen_rtx_AND (mode, x, op_false);
11671 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11675 op_true = force_reg (mode, op_true);
11676 op_false = force_reg (mode, op_false);
11678 t2 = gen_reg_rtx (mode);
11680 t3 = gen_reg_rtx (mode);
11684 x = gen_rtx_AND (mode, op_true, cmp);
11685 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
11687 x = gen_rtx_NOT (mode, cmp);
11688 x = gen_rtx_AND (mode, x, op_false);
11689 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
11691 x = gen_rtx_IOR (mode, t3, t2);
11692 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11696 /* Expand a floating-point conditional move. Return true if successful. */
11699 ix86_expand_fp_movcc (rtx operands[])
11701 enum machine_mode mode = GET_MODE (operands[0]);
11702 enum rtx_code code = GET_CODE (operands[1]);
11703 rtx tmp, compare_op, second_test, bypass_test;
11705 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
11707 enum machine_mode cmode;
11709 /* Since we've no cmove for sse registers, don't force bad register
11710 allocation just to gain access to it. Deny movcc when the
11711 comparison mode doesn't match the move mode. */
11712 cmode = GET_MODE (ix86_compare_op0);
11713 if (cmode == VOIDmode)
11714 cmode = GET_MODE (ix86_compare_op1);
11718 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11720 &ix86_compare_op1);
11721 if (code == UNKNOWN)
11724 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
11725 ix86_compare_op1, operands[2],
11729 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
11730 ix86_compare_op1, operands[2], operands[3]);
11731 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
11735 /* The floating point conditional move instructions don't directly
11736 support conditions resulting from a signed integer comparison. */
11738 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11740 /* The floating point conditional move instructions don't directly
11741 support signed integer comparisons. */
11743 if (!fcmov_comparison_operator (compare_op, VOIDmode))
11745 gcc_assert (!second_test && !bypass_test);
11746 tmp = gen_reg_rtx (QImode);
11747 ix86_expand_setcc (code, tmp);
11749 ix86_compare_op0 = tmp;
11750 ix86_compare_op1 = const0_rtx;
11751 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11753 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11755 tmp = gen_reg_rtx (mode);
11756 emit_move_insn (tmp, operands[3]);
11759 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11761 tmp = gen_reg_rtx (mode);
11762 emit_move_insn (tmp, operands[2]);
11766 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11767 gen_rtx_IF_THEN_ELSE (mode, compare_op,
11768 operands[2], operands[3])));
11770 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11771 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
11772 operands[3], operands[0])));
11774 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11775 gen_rtx_IF_THEN_ELSE (mode, second_test,
11776 operands[2], operands[0])));
11781 /* Expand a floating-point vector conditional move; a vcond operation
11782 rather than a movcc operation. */
11785 ix86_expand_fp_vcond (rtx operands[])
11787 enum rtx_code code = GET_CODE (operands[3]);
11790 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11791 &operands[4], &operands[5]);
11792 if (code == UNKNOWN)
11795 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
11796 operands[5], operands[1], operands[2]))
11799 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
11800 operands[1], operands[2]);
11801 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
11805 /* Expand a signed integral vector conditional move. */
11808 ix86_expand_int_vcond (rtx operands[])
11810 enum machine_mode mode = GET_MODE (operands[0]);
11811 enum rtx_code code = GET_CODE (operands[3]);
11812 bool negate = false;
11815 cop0 = operands[4];
11816 cop1 = operands[5];
11818 /* Canonicalize the comparison to EQ, GT, GTU. */
11829 code = reverse_condition (code);
11835 code = reverse_condition (code);
11841 code = swap_condition (code);
11842 x = cop0, cop0 = cop1, cop1 = x;
11846 gcc_unreachable ();
11849 /* Unsigned parallel compare is not supported by the hardware. Play some
11850 tricks to turn this into a signed comparison against 0. */
11853 cop0 = force_reg (mode, cop0);
11861 /* Perform a parallel modulo subtraction. */
11862 t1 = gen_reg_rtx (mode);
11863 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11865 /* Extract the original sign bit of op0. */
11866 mask = GEN_INT (-0x80000000);
11867 mask = gen_rtx_CONST_VECTOR (mode,
11868 gen_rtvec (4, mask, mask, mask, mask));
11869 mask = force_reg (mode, mask);
11870 t2 = gen_reg_rtx (mode);
11871 emit_insn (gen_andv4si3 (t2, cop0, mask));
11873 /* XOR it back into the result of the subtraction. This results
11874 in the sign bit set iff we saw unsigned underflow. */
11875 x = gen_reg_rtx (mode);
11876 emit_insn (gen_xorv4si3 (x, t1, t2));
11884 /* Perform a parallel unsigned saturating subtraction. */
11885 x = gen_reg_rtx (mode);
11886 emit_insn (gen_rtx_SET (VOIDmode, x,
11887 gen_rtx_US_MINUS (mode, cop0, cop1)));
11894 gcc_unreachable ();
11898 cop1 = CONST0_RTX (mode);
11901 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11902 operands[1+negate], operands[2-negate]);
11904 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11905 operands[2-negate]);
11909 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
11910 true if we should do zero extension, else sign extension. HIGH_P is
11911 true if we want the N/2 high elements, else the low elements. */
11914 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
11916 enum machine_mode imode = GET_MODE (operands[1]);
11917 rtx (*unpack)(rtx, rtx, rtx);
11924 unpack = gen_vec_interleave_highv16qi;
11926 unpack = gen_vec_interleave_lowv16qi;
11930 unpack = gen_vec_interleave_highv8hi;
11932 unpack = gen_vec_interleave_lowv8hi;
11936 unpack = gen_vec_interleave_highv4si;
11938 unpack = gen_vec_interleave_lowv4si;
11941 gcc_unreachable ();
11944 dest = gen_lowpart (imode, operands[0]);
11947 se = force_reg (imode, CONST0_RTX (imode));
11949 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
11950 operands[1], pc_rtx, pc_rtx);
11952 emit_insn (unpack (dest, operands[1], se));
11955 /* Expand conditional increment or decrement using adb/sbb instructions.
11956 The default case using setcc followed by the conditional move can be
11957 done by generic code. */
11959 ix86_expand_int_addcc (rtx operands[])
11961 enum rtx_code code = GET_CODE (operands[1]);
11963 rtx val = const0_rtx;
11964 bool fpcmp = false;
11965 enum machine_mode mode = GET_MODE (operands[0]);
11967 if (operands[3] != const1_rtx
11968 && operands[3] != constm1_rtx)
11970 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11971 ix86_compare_op1, &compare_op))
11973 code = GET_CODE (compare_op);
11975 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11976 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11979 code = ix86_fp_compare_code_to_integer (code);
11986 PUT_CODE (compare_op,
11987 reverse_condition_maybe_unordered
11988 (GET_CODE (compare_op)));
11990 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11992 PUT_MODE (compare_op, mode);
11994 /* Construct either adc or sbb insn. */
11995 if ((code == LTU) == (operands[3] == constm1_rtx))
11997 switch (GET_MODE (operands[0]))
12000 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
12003 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
12006 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
12009 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12012 gcc_unreachable ();
12017 switch (GET_MODE (operands[0]))
12020 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
12023 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
12026 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
12029 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12032 gcc_unreachable ();
12035 return 1; /* DONE */
12039 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
12040 works for floating pointer parameters and nonoffsetable memories.
12041 For pushes, it returns just stack offsets; the values will be saved
12042 in the right order. Maximally three parts are generated. */
12045 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
12050 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
12052 size = (GET_MODE_SIZE (mode) + 4) / 8;
12054 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
12055 gcc_assert (size >= 2 && size <= 3);
12057 /* Optimize constant pool reference to immediates. This is used by fp
12058 moves, that force all constants to memory to allow combining. */
12059 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
12061 rtx tmp = maybe_get_pool_constant (operand);
12066 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
12068 /* The only non-offsetable memories we handle are pushes. */
12069 int ok = push_operand (operand, VOIDmode);
12073 operand = copy_rtx (operand);
12074 PUT_MODE (operand, Pmode);
12075 parts[0] = parts[1] = parts[2] = operand;
12079 if (GET_CODE (operand) == CONST_VECTOR)
12081 enum machine_mode imode = int_mode_for_mode (mode);
12082 /* Caution: if we looked through a constant pool memory above,
12083 the operand may actually have a different mode now. That's
12084 ok, since we want to pun this all the way back to an integer. */
12085 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
12086 gcc_assert (operand != NULL);
12092 if (mode == DImode)
12093 split_di (&operand, 1, &parts[0], &parts[1]);
12096 if (REG_P (operand))
12098 gcc_assert (reload_completed);
12099 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
12100 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
12102 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
12104 else if (offsettable_memref_p (operand))
12106 operand = adjust_address (operand, SImode, 0);
12107 parts[0] = operand;
12108 parts[1] = adjust_address (operand, SImode, 4);
12110 parts[2] = adjust_address (operand, SImode, 8);
12112 else if (GET_CODE (operand) == CONST_DOUBLE)
12117 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12121 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
12122 parts[2] = gen_int_mode (l[2], SImode);
12125 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
12128 gcc_unreachable ();
12130 parts[1] = gen_int_mode (l[1], SImode);
12131 parts[0] = gen_int_mode (l[0], SImode);
12134 gcc_unreachable ();
12139 if (mode == TImode)
12140 split_ti (&operand, 1, &parts[0], &parts[1]);
12141 if (mode == XFmode || mode == TFmode)
12143 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
12144 if (REG_P (operand))
12146 gcc_assert (reload_completed);
12147 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
12148 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
12150 else if (offsettable_memref_p (operand))
12152 operand = adjust_address (operand, DImode, 0);
12153 parts[0] = operand;
12154 parts[1] = adjust_address (operand, upper_mode, 8);
12156 else if (GET_CODE (operand) == CONST_DOUBLE)
12161 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12162 real_to_target (l, &r, mode);
12164 /* Do not use shift by 32 to avoid warning on 32bit systems. */
12165 if (HOST_BITS_PER_WIDE_INT >= 64)
12168 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
12169 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
12172 parts[0] = immed_double_const (l[0], l[1], DImode);
12174 if (upper_mode == SImode)
12175 parts[1] = gen_int_mode (l[2], SImode);
12176 else if (HOST_BITS_PER_WIDE_INT >= 64)
12179 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
12180 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
12183 parts[1] = immed_double_const (l[2], l[3], DImode);
12186 gcc_unreachable ();
12193 /* Emit insns to perform a move or push of DI, DF, and XF values.
12194 Return false when normal moves are needed; true when all required
12195 insns have been emitted. Operands 2-4 contain the input values
12196 int the correct order; operands 5-7 contain the output values. */
12199 ix86_split_long_move (rtx operands[])
12204 int collisions = 0;
12205 enum machine_mode mode = GET_MODE (operands[0]);
12207 /* The DFmode expanders may ask us to move double.
12208 For 64bit target this is single move. By hiding the fact
12209 here we simplify i386.md splitters. */
12210 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
12212 /* Optimize constant pool reference to immediates. This is used by
12213 fp moves, that force all constants to memory to allow combining. */
12215 if (GET_CODE (operands[1]) == MEM
12216 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
12217 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
12218 operands[1] = get_pool_constant (XEXP (operands[1], 0));
12219 if (push_operand (operands[0], VOIDmode))
12221 operands[0] = copy_rtx (operands[0]);
12222 PUT_MODE (operands[0], Pmode);
12225 operands[0] = gen_lowpart (DImode, operands[0]);
12226 operands[1] = gen_lowpart (DImode, operands[1]);
12227 emit_move_insn (operands[0], operands[1]);
12231 /* The only non-offsettable memory we handle is push. */
12232 if (push_operand (operands[0], VOIDmode))
12235 gcc_assert (GET_CODE (operands[0]) != MEM
12236 || offsettable_memref_p (operands[0]));
12238 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
12239 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
12241 /* When emitting push, take care for source operands on the stack. */
12242 if (push && GET_CODE (operands[1]) == MEM
12243 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
12246 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
12247 XEXP (part[1][2], 0));
12248 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
12249 XEXP (part[1][1], 0));
12252 /* We need to do copy in the right order in case an address register
12253 of the source overlaps the destination. */
12254 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
12256 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
12258 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12261 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
12264 /* Collision in the middle part can be handled by reordering. */
12265 if (collisions == 1 && nparts == 3
12266 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12269 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
12270 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
12273 /* If there are more collisions, we can't handle it by reordering.
12274 Do an lea to the last part and use only one colliding move. */
12275 else if (collisions > 1)
12281 base = part[0][nparts - 1];
12283 /* Handle the case when the last part isn't valid for lea.
12284 Happens in 64-bit mode storing the 12-byte XFmode. */
12285 if (GET_MODE (base) != Pmode)
12286 base = gen_rtx_REG (Pmode, REGNO (base));
12288 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
12289 part[1][0] = replace_equiv_address (part[1][0], base);
12290 part[1][1] = replace_equiv_address (part[1][1],
12291 plus_constant (base, UNITS_PER_WORD));
12293 part[1][2] = replace_equiv_address (part[1][2],
12294 plus_constant (base, 8));
12304 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
12305 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
12306 emit_move_insn (part[0][2], part[1][2]);
12311 /* In 64bit mode we don't have 32bit push available. In case this is
12312 register, it is OK - we will just use larger counterpart. We also
12313 retype memory - these comes from attempt to avoid REX prefix on
12314 moving of second half of TFmode value. */
12315 if (GET_MODE (part[1][1]) == SImode)
12317 switch (GET_CODE (part[1][1]))
12320 part[1][1] = adjust_address (part[1][1], DImode, 0);
12324 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
12328 gcc_unreachable ();
12331 if (GET_MODE (part[1][0]) == SImode)
12332 part[1][0] = part[1][1];
12335 emit_move_insn (part[0][1], part[1][1]);
12336 emit_move_insn (part[0][0], part[1][0]);
12340 /* Choose correct order to not overwrite the source before it is copied. */
12341 if ((REG_P (part[0][0])
12342 && REG_P (part[1][1])
12343 && (REGNO (part[0][0]) == REGNO (part[1][1])
12345 && REGNO (part[0][0]) == REGNO (part[1][2]))))
12347 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
12351 operands[2] = part[0][2];
12352 operands[3] = part[0][1];
12353 operands[4] = part[0][0];
12354 operands[5] = part[1][2];
12355 operands[6] = part[1][1];
12356 operands[7] = part[1][0];
12360 operands[2] = part[0][1];
12361 operands[3] = part[0][0];
12362 operands[5] = part[1][1];
12363 operands[6] = part[1][0];
12370 operands[2] = part[0][0];
12371 operands[3] = part[0][1];
12372 operands[4] = part[0][2];
12373 operands[5] = part[1][0];
12374 operands[6] = part[1][1];
12375 operands[7] = part[1][2];
12379 operands[2] = part[0][0];
12380 operands[3] = part[0][1];
12381 operands[5] = part[1][0];
12382 operands[6] = part[1][1];
12386 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
12389 if (GET_CODE (operands[5]) == CONST_INT
12390 && operands[5] != const0_rtx
12391 && REG_P (operands[2]))
12393 if (GET_CODE (operands[6]) == CONST_INT
12394 && INTVAL (operands[6]) == INTVAL (operands[5]))
12395 operands[6] = operands[2];
12398 && GET_CODE (operands[7]) == CONST_INT
12399 && INTVAL (operands[7]) == INTVAL (operands[5]))
12400 operands[7] = operands[2];
12404 && GET_CODE (operands[6]) == CONST_INT
12405 && operands[6] != const0_rtx
12406 && REG_P (operands[3])
12407 && GET_CODE (operands[7]) == CONST_INT
12408 && INTVAL (operands[7]) == INTVAL (operands[6]))
12409 operands[7] = operands[3];
12412 emit_move_insn (operands[2], operands[5]);
12413 emit_move_insn (operands[3], operands[6]);
12415 emit_move_insn (operands[4], operands[7]);
12420 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
12421 left shift by a constant, either using a single shift or
12422 a sequence of add instructions. */
12425 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
12429 emit_insn ((mode == DImode
12431 : gen_adddi3) (operand, operand, operand));
12433 else if (!optimize_size
12434 && count * ix86_cost->add <= ix86_cost->shift_const)
12437 for (i=0; i<count; i++)
12439 emit_insn ((mode == DImode
12441 : gen_adddi3) (operand, operand, operand));
12445 emit_insn ((mode == DImode
12447 : gen_ashldi3) (operand, operand, GEN_INT (count)));
12451 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
12453 rtx low[2], high[2];
12455 const int single_width = mode == DImode ? 32 : 64;
12457 if (GET_CODE (operands[2]) == CONST_INT)
12459 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12460 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12462 if (count >= single_width)
12464 emit_move_insn (high[0], low[1]);
12465 emit_move_insn (low[0], const0_rtx);
12467 if (count > single_width)
12468 ix86_expand_ashl_const (high[0], count - single_width, mode);
12472 if (!rtx_equal_p (operands[0], operands[1]))
12473 emit_move_insn (operands[0], operands[1]);
12474 emit_insn ((mode == DImode
12476 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
12477 ix86_expand_ashl_const (low[0], count, mode);
12482 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12484 if (operands[1] == const1_rtx)
12486 /* Assuming we've chosen a QImode capable registers, then 1 << N
12487 can be done with two 32/64-bit shifts, no branches, no cmoves. */
12488 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
12490 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
12492 ix86_expand_clear (low[0]);
12493 ix86_expand_clear (high[0]);
12494 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
12496 d = gen_lowpart (QImode, low[0]);
12497 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12498 s = gen_rtx_EQ (QImode, flags, const0_rtx);
12499 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12501 d = gen_lowpart (QImode, high[0]);
12502 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12503 s = gen_rtx_NE (QImode, flags, const0_rtx);
12504 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12507 /* Otherwise, we can get the same results by manually performing
12508 a bit extract operation on bit 5/6, and then performing the two
12509 shifts. The two methods of getting 0/1 into low/high are exactly
12510 the same size. Avoiding the shift in the bit extract case helps
12511 pentium4 a bit; no one else seems to care much either way. */
12516 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
12517 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
12519 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
12520 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
12522 emit_insn ((mode == DImode
12524 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
12525 emit_insn ((mode == DImode
12527 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
12528 emit_move_insn (low[0], high[0]);
12529 emit_insn ((mode == DImode
12531 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
12534 emit_insn ((mode == DImode
12536 : gen_ashldi3) (low[0], low[0], operands[2]));
12537 emit_insn ((mode == DImode
12539 : gen_ashldi3) (high[0], high[0], operands[2]));
12543 if (operands[1] == constm1_rtx)
12545 /* For -1 << N, we can avoid the shld instruction, because we
12546 know that we're shifting 0...31/63 ones into a -1. */
12547 emit_move_insn (low[0], constm1_rtx);
12549 emit_move_insn (high[0], low[0]);
12551 emit_move_insn (high[0], constm1_rtx);
12555 if (!rtx_equal_p (operands[0], operands[1]))
12556 emit_move_insn (operands[0], operands[1]);
12558 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12559 emit_insn ((mode == DImode
12561 : gen_x86_64_shld) (high[0], low[0], operands[2]));
12564 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
12566 if (TARGET_CMOVE && scratch)
12568 ix86_expand_clear (scratch);
12569 emit_insn ((mode == DImode
12570 ? gen_x86_shift_adj_1
12571 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
12574 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
12578 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
12580 rtx low[2], high[2];
12582 const int single_width = mode == DImode ? 32 : 64;
12584 if (GET_CODE (operands[2]) == CONST_INT)
12586 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12587 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12589 if (count == single_width * 2 - 1)
12591 emit_move_insn (high[0], high[1]);
12592 emit_insn ((mode == DImode
12594 : gen_ashrdi3) (high[0], high[0],
12595 GEN_INT (single_width - 1)));
12596 emit_move_insn (low[0], high[0]);
12599 else if (count >= single_width)
12601 emit_move_insn (low[0], high[1]);
12602 emit_move_insn (high[0], low[0]);
12603 emit_insn ((mode == DImode
12605 : gen_ashrdi3) (high[0], high[0],
12606 GEN_INT (single_width - 1)));
12607 if (count > single_width)
12608 emit_insn ((mode == DImode
12610 : gen_ashrdi3) (low[0], low[0],
12611 GEN_INT (count - single_width)));
12615 if (!rtx_equal_p (operands[0], operands[1]))
12616 emit_move_insn (operands[0], operands[1]);
12617 emit_insn ((mode == DImode
12619 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12620 emit_insn ((mode == DImode
12622 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
12627 if (!rtx_equal_p (operands[0], operands[1]))
12628 emit_move_insn (operands[0], operands[1]);
12630 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12632 emit_insn ((mode == DImode
12634 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12635 emit_insn ((mode == DImode
12637 : gen_ashrdi3) (high[0], high[0], operands[2]));
12639 if (TARGET_CMOVE && scratch)
12641 emit_move_insn (scratch, high[0]);
12642 emit_insn ((mode == DImode
12644 : gen_ashrdi3) (scratch, scratch,
12645 GEN_INT (single_width - 1)));
12646 emit_insn ((mode == DImode
12647 ? gen_x86_shift_adj_1
12648 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12652 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
12657 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
12659 rtx low[2], high[2];
12661 const int single_width = mode == DImode ? 32 : 64;
12663 if (GET_CODE (operands[2]) == CONST_INT)
12665 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12666 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12668 if (count >= single_width)
12670 emit_move_insn (low[0], high[1]);
12671 ix86_expand_clear (high[0]);
12673 if (count > single_width)
12674 emit_insn ((mode == DImode
12676 : gen_lshrdi3) (low[0], low[0],
12677 GEN_INT (count - single_width)));
12681 if (!rtx_equal_p (operands[0], operands[1]))
12682 emit_move_insn (operands[0], operands[1]);
12683 emit_insn ((mode == DImode
12685 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12686 emit_insn ((mode == DImode
12688 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
12693 if (!rtx_equal_p (operands[0], operands[1]))
12694 emit_move_insn (operands[0], operands[1]);
12696 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12698 emit_insn ((mode == DImode
12700 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12701 emit_insn ((mode == DImode
12703 : gen_lshrdi3) (high[0], high[0], operands[2]));
12705 /* Heh. By reversing the arguments, we can reuse this pattern. */
12706 if (TARGET_CMOVE && scratch)
12708 ix86_expand_clear (scratch);
12709 emit_insn ((mode == DImode
12710 ? gen_x86_shift_adj_1
12711 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12715 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
12719 /* Predict just emitted jump instruction to be taken with probability PROB. */
12721 predict_jump (int prob)
12723 rtx insn = get_last_insn ();
12724 gcc_assert (GET_CODE (insn) == JUMP_INSN);
12726 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12731 /* Helper function for the string operations below. Dest VARIABLE whether
12732 it is aligned to VALUE bytes. If true, jump to the label. */
12734 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
12736 rtx label = gen_label_rtx ();
12737 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
12738 if (GET_MODE (variable) == DImode)
12739 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
12741 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
12742 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
12745 predict_jump (REG_BR_PROB_BASE * 50 / 100);
12747 predict_jump (REG_BR_PROB_BASE * 90 / 100);
12751 /* Adjust COUNTER by the VALUE. */
12753 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
12755 if (GET_MODE (countreg) == DImode)
12756 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
12758 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
12761 /* Zero extend possibly SImode EXP to Pmode register. */
12763 ix86_zero_extend_to_Pmode (rtx exp)
12766 if (GET_MODE (exp) == VOIDmode)
12767 return force_reg (Pmode, exp);
12768 if (GET_MODE (exp) == Pmode)
12769 return copy_to_mode_reg (Pmode, exp);
12770 r = gen_reg_rtx (Pmode);
12771 emit_insn (gen_zero_extendsidi2 (r, exp));
12775 /* Divide COUNTREG by SCALE. */
12777 scale_counter (rtx countreg, int scale)
12780 rtx piece_size_mask;
12784 if (GET_CODE (countreg) == CONST_INT)
12785 return GEN_INT (INTVAL (countreg) / scale);
12786 gcc_assert (REG_P (countreg));
12788 piece_size_mask = GEN_INT (scale - 1);
12789 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
12790 GEN_INT (exact_log2 (scale)),
12791 NULL, 1, OPTAB_DIRECT);
12795 /* When SRCPTR is non-NULL, output simple loop to move memory
12796 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
12797 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
12798 equivalent loop to set memory by VALUE (supposed to be in MODE).
12800 The size is rounded down to whole number of chunk size moved at once.
12801 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
12805 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
12806 rtx destptr, rtx srcptr, rtx value,
12807 rtx count, enum machine_mode mode, int unroll,
12810 rtx out_label, top_label, iter, tmp;
12811 enum machine_mode iter_mode;
12812 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
12813 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
12819 iter_mode = GET_MODE (count);
12820 if (iter_mode == VOIDmode)
12821 iter_mode = word_mode;
12823 top_label = gen_label_rtx ();
12824 out_label = gen_label_rtx ();
12825 iter = gen_reg_rtx (iter_mode);
12827 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
12828 NULL, 1, OPTAB_DIRECT);
12829 /* Those two should combine. */
12830 if (piece_size == const1_rtx)
12832 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
12834 predict_jump (REG_BR_PROB_BASE * 10 / 100);
12836 emit_move_insn (iter, const0_rtx);
12838 emit_label (top_label);
12840 tmp = convert_modes (Pmode, iter_mode, iter, true);
12841 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
12842 destmem = change_address (destmem, mode, x_addr);
12846 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
12847 srcmem = change_address (srcmem, mode, y_addr);
12849 /* When unrolling for chips that reorder memory reads and writes,
12850 we can save registers by using single temporary.
12851 Also using 4 temporaries is overkill in 32bit mode. */
12852 if (!TARGET_64BIT && 0)
12854 for (i = 0; i < unroll; i++)
12859 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12861 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12863 emit_move_insn (destmem, srcmem);
12869 gcc_assert (unroll <= 4);
12870 for (i = 0; i < unroll; i++)
12872 tmpreg[i] = gen_reg_rtx (mode);
12876 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12878 emit_move_insn (tmpreg[i], srcmem);
12880 for (i = 0; i < unroll; i++)
12885 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12887 emit_move_insn (destmem, tmpreg[i]);
12892 for (i = 0; i < unroll; i++)
12896 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12897 emit_move_insn (destmem, value);
12900 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
12901 true, OPTAB_LIB_WIDEN);
12903 emit_move_insn (iter, tmp);
12905 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
12907 if (expected_size != -1)
12909 expected_size /= GET_MODE_SIZE (mode) * unroll;
12910 if (expected_size == 0)
12912 else if (expected_size > REG_BR_PROB_BASE)
12913 predict_jump (REG_BR_PROB_BASE - 1);
12915 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
12918 predict_jump (REG_BR_PROB_BASE * 80 / 100);
12919 iter = ix86_zero_extend_to_Pmode (iter);
12920 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
12921 true, OPTAB_LIB_WIDEN);
12922 if (tmp != destptr)
12923 emit_move_insn (destptr, tmp);
12926 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
12927 true, OPTAB_LIB_WIDEN);
12929 emit_move_insn (srcptr, tmp);
12931 emit_label (out_label);
12934 /* Output "rep; mov" instruction.
12935 Arguments have same meaning as for previous function */
12937 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
12938 rtx destptr, rtx srcptr,
12940 enum machine_mode mode)
12946 /* If the size is known, it is shorter to use rep movs. */
12947 if (mode == QImode && GET_CODE (count) == CONST_INT
12948 && !(INTVAL (count) & 3))
12951 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12952 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12953 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
12954 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
12955 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12956 if (mode != QImode)
12958 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12959 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12960 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12961 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
12962 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12963 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
12967 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12968 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
12970 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
12974 /* Output "rep; stos" instruction.
12975 Arguments have same meaning as for previous function */
12977 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
12979 enum machine_mode mode)
12984 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12985 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12986 value = force_reg (mode, gen_lowpart (mode, value));
12987 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12988 if (mode != QImode)
12990 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12991 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12992 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12995 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12996 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
13000 emit_strmov (rtx destmem, rtx srcmem,
13001 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
13003 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
13004 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
13005 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13008 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
13010 expand_movmem_epilogue (rtx destmem, rtx srcmem,
13011 rtx destptr, rtx srcptr, rtx count, int max_size)
13014 if (GET_CODE (count) == CONST_INT)
13016 HOST_WIDE_INT countval = INTVAL (count);
13019 if ((countval & 0x16) && max_size > 16)
13023 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13024 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
13027 gcc_unreachable ();
13030 if ((countval & 0x08) && max_size > 8)
13033 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13036 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13037 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 4);
13041 if ((countval & 0x04) && max_size > 4)
13043 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
13046 if ((countval & 0x02) && max_size > 2)
13048 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
13051 if ((countval & 0x01) && max_size > 1)
13053 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
13060 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13061 count, 1, OPTAB_DIRECT);
13062 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
13063 count, QImode, 1, 4);
13067 /* When there are stringops, we can cheaply increase dest and src pointers.
13068 Otherwise we save code size by maintaining offset (zero is readily
13069 available from preceding rep operation) and using x86 addressing modes.
13071 if (TARGET_SINGLE_STRINGOP)
13075 rtx label = ix86_expand_aligntest (count, 4, true);
13076 src = change_address (srcmem, SImode, srcptr);
13077 dest = change_address (destmem, SImode, destptr);
13078 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13079 emit_label (label);
13080 LABEL_NUSES (label) = 1;
13084 rtx label = ix86_expand_aligntest (count, 2, true);
13085 src = change_address (srcmem, HImode, srcptr);
13086 dest = change_address (destmem, HImode, destptr);
13087 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13088 emit_label (label);
13089 LABEL_NUSES (label) = 1;
13093 rtx label = ix86_expand_aligntest (count, 1, true);
13094 src = change_address (srcmem, QImode, srcptr);
13095 dest = change_address (destmem, QImode, destptr);
13096 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13097 emit_label (label);
13098 LABEL_NUSES (label) = 1;
13103 rtx offset = force_reg (Pmode, const0_rtx);
13108 rtx label = ix86_expand_aligntest (count, 4, true);
13109 src = change_address (srcmem, SImode, srcptr);
13110 dest = change_address (destmem, SImode, destptr);
13111 emit_move_insn (dest, src);
13112 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
13113 true, OPTAB_LIB_WIDEN);
13115 emit_move_insn (offset, tmp);
13116 emit_label (label);
13117 LABEL_NUSES (label) = 1;
13121 rtx label = ix86_expand_aligntest (count, 2, true);
13122 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13123 src = change_address (srcmem, HImode, tmp);
13124 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13125 dest = change_address (destmem, HImode, tmp);
13126 emit_move_insn (dest, src);
13127 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
13128 true, OPTAB_LIB_WIDEN);
13130 emit_move_insn (offset, tmp);
13131 emit_label (label);
13132 LABEL_NUSES (label) = 1;
13136 rtx label = ix86_expand_aligntest (count, 1, true);
13137 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13138 src = change_address (srcmem, QImode, tmp);
13139 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13140 dest = change_address (destmem, QImode, tmp);
13141 emit_move_insn (dest, src);
13142 emit_label (label);
13143 LABEL_NUSES (label) = 1;
13148 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13150 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
13151 rtx count, int max_size)
13154 expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13155 count, 1, OPTAB_DIRECT);
13156 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
13157 gen_lowpart (QImode, value), count, QImode,
13161 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13163 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
13167 if (GET_CODE (count) == CONST_INT)
13169 HOST_WIDE_INT countval = INTVAL (count);
13172 if ((countval & 0x16) && max_size > 16)
13176 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13177 emit_insn (gen_strset (destptr, dest, value));
13178 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
13179 emit_insn (gen_strset (destptr, dest, value));
13182 gcc_unreachable ();
13185 if ((countval & 0x08) && max_size > 8)
13189 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13190 emit_insn (gen_strset (destptr, dest, value));
13194 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13195 emit_insn (gen_strset (destptr, dest, value));
13196 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
13197 emit_insn (gen_strset (destptr, dest, value));
13201 if ((countval & 0x04) && max_size > 4)
13203 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13204 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13207 if ((countval & 0x02) && max_size > 2)
13209 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
13210 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13213 if ((countval & 0x01) && max_size > 1)
13215 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
13216 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13223 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
13228 rtx label = ix86_expand_aligntest (count, 16, true);
13231 dest = change_address (destmem, DImode, destptr);
13232 emit_insn (gen_strset (destptr, dest, value));
13233 emit_insn (gen_strset (destptr, dest, value));
13237 dest = change_address (destmem, SImode, destptr);
13238 emit_insn (gen_strset (destptr, dest, value));
13239 emit_insn (gen_strset (destptr, dest, value));
13240 emit_insn (gen_strset (destptr, dest, value));
13241 emit_insn (gen_strset (destptr, dest, value));
13243 emit_label (label);
13244 LABEL_NUSES (label) = 1;
13248 rtx label = ix86_expand_aligntest (count, 8, true);
13251 dest = change_address (destmem, DImode, destptr);
13252 emit_insn (gen_strset (destptr, dest, value));
13256 dest = change_address (destmem, SImode, destptr);
13257 emit_insn (gen_strset (destptr, dest, value));
13258 emit_insn (gen_strset (destptr, dest, value));
13260 emit_label (label);
13261 LABEL_NUSES (label) = 1;
13265 rtx label = ix86_expand_aligntest (count, 4, true);
13266 dest = change_address (destmem, SImode, destptr);
13267 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13268 emit_label (label);
13269 LABEL_NUSES (label) = 1;
13273 rtx label = ix86_expand_aligntest (count, 2, true);
13274 dest = change_address (destmem, HImode, destptr);
13275 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13276 emit_label (label);
13277 LABEL_NUSES (label) = 1;
13281 rtx label = ix86_expand_aligntest (count, 1, true);
13282 dest = change_address (destmem, QImode, destptr);
13283 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13284 emit_label (label);
13285 LABEL_NUSES (label) = 1;
13289 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
13290 DESIRED_ALIGNMENT. */
13292 expand_movmem_prologue (rtx destmem, rtx srcmem,
13293 rtx destptr, rtx srcptr, rtx count,
13294 int align, int desired_alignment)
13296 if (align <= 1 && desired_alignment > 1)
13298 rtx label = ix86_expand_aligntest (destptr, 1, false);
13299 srcmem = change_address (srcmem, QImode, srcptr);
13300 destmem = change_address (destmem, QImode, destptr);
13301 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13302 ix86_adjust_counter (count, 1);
13303 emit_label (label);
13304 LABEL_NUSES (label) = 1;
13306 if (align <= 2 && desired_alignment > 2)
13308 rtx label = ix86_expand_aligntest (destptr, 2, false);
13309 srcmem = change_address (srcmem, HImode, srcptr);
13310 destmem = change_address (destmem, HImode, destptr);
13311 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13312 ix86_adjust_counter (count, 2);
13313 emit_label (label);
13314 LABEL_NUSES (label) = 1;
13316 if (align <= 4 && desired_alignment > 4)
13318 rtx label = ix86_expand_aligntest (destptr, 4, false);
13319 srcmem = change_address (srcmem, SImode, srcptr);
13320 destmem = change_address (destmem, SImode, destptr);
13321 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13322 ix86_adjust_counter (count, 4);
13323 emit_label (label);
13324 LABEL_NUSES (label) = 1;
13326 gcc_assert (desired_alignment <= 8);
13329 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
13330 DESIRED_ALIGNMENT. */
13332 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
13333 int align, int desired_alignment)
13335 if (align <= 1 && desired_alignment > 1)
13337 rtx label = ix86_expand_aligntest (destptr, 1, false);
13338 destmem = change_address (destmem, QImode, destptr);
13339 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
13340 ix86_adjust_counter (count, 1);
13341 emit_label (label);
13342 LABEL_NUSES (label) = 1;
13344 if (align <= 2 && desired_alignment > 2)
13346 rtx label = ix86_expand_aligntest (destptr, 2, false);
13347 destmem = change_address (destmem, HImode, destptr);
13348 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
13349 ix86_adjust_counter (count, 2);
13350 emit_label (label);
13351 LABEL_NUSES (label) = 1;
13353 if (align <= 4 && desired_alignment > 4)
13355 rtx label = ix86_expand_aligntest (destptr, 4, false);
13356 destmem = change_address (destmem, SImode, destptr);
13357 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
13358 ix86_adjust_counter (count, 4);
13359 emit_label (label);
13360 LABEL_NUSES (label) = 1;
13362 gcc_assert (desired_alignment <= 8);
13365 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
13366 static enum stringop_alg
13367 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
13368 int *dynamic_check)
13370 const struct stringop_algs * algs;
13372 *dynamic_check = -1;
13374 algs = &ix86_cost->memset[TARGET_64BIT != 0];
13376 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
13377 if (stringop_alg != no_stringop)
13378 return stringop_alg;
13379 /* rep; movq or rep; movl is the smallest variant. */
13380 else if (optimize_size)
13382 if (!count || (count & 3))
13383 return rep_prefix_1_byte;
13385 return rep_prefix_4_byte;
13387 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
13389 else if (expected_size != -1 && expected_size < 4)
13390 return loop_1_byte;
13391 else if (expected_size != -1)
13394 enum stringop_alg alg = libcall;
13395 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13397 gcc_assert (algs->size[i].max);
13398 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
13400 if (algs->size[i].alg != libcall)
13401 alg = algs->size[i].alg;
13402 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
13403 last non-libcall inline algorithm. */
13404 if (TARGET_INLINE_ALL_STRINGOPS)
13406 /* When the current size is best to be copied by a libcall,
13407 but we are still forced to inline, run the heuristic bellow
13408 that will pick code for medium sized blocks. */
13409 if (alg != libcall)
13414 return algs->size[i].alg;
13417 gcc_assert (TARGET_INLINE_ALL_STRINGOPS);
13419 /* When asked to inline the call anyway, try to pick meaningful choice.
13420 We look for maximal size of block that is faster to copy by hand and
13421 take blocks of at most of that size guessing that average size will
13422 be roughly half of the block.
13424 If this turns out to be bad, we might simply specify the preferred
13425 choice in ix86_costs. */
13426 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13427 && algs->unknown_size == libcall)
13430 enum stringop_alg alg;
13433 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13434 if (algs->size[i].alg != libcall && algs->size[i].alg)
13435 max = algs->size[i].max;
13438 alg = decide_alg (count, max / 2, memset, dynamic_check);
13439 gcc_assert (*dynamic_check == -1);
13440 gcc_assert (alg != libcall);
13441 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13442 *dynamic_check = max;
13445 return algs->unknown_size;
13448 /* Decide on alignment. We know that the operand is already aligned to ALIGN
13449 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
13451 decide_alignment (int align,
13452 enum stringop_alg alg,
13455 int desired_align = 0;
13459 gcc_unreachable ();
13461 case unrolled_loop:
13462 desired_align = GET_MODE_SIZE (Pmode);
13464 case rep_prefix_8_byte:
13467 case rep_prefix_4_byte:
13468 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13469 copying whole cacheline at once. */
13470 if (TARGET_PENTIUMPRO)
13475 case rep_prefix_1_byte:
13476 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13477 copying whole cacheline at once. */
13478 if (TARGET_PENTIUMPRO)
13492 if (desired_align < align)
13493 desired_align = align;
13494 if (expected_size != -1 && expected_size < 4)
13495 desired_align = align;
13496 return desired_align;
13499 /* Return the smallest power of 2 greater than VAL. */
13501 smallest_pow2_greater_than (int val)
13509 /* Expand string move (memcpy) operation. Use i386 string operations when
13510 profitable. expand_clrmem contains similar code. The code depends upon
13511 architecture, block size and alignment, but always has the same
13514 1) Prologue guard: Conditional that jumps up to epilogues for small
13515 blocks that can be handled by epilogue alone. This is faster but
13516 also needed for correctness, since prologue assume the block is larger
13517 than the desired alignment.
13519 Optional dynamic check for size and libcall for large
13520 blocks is emitted here too, with -minline-stringops-dynamically.
13522 2) Prologue: copy first few bytes in order to get destination aligned
13523 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
13524 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
13525 We emit either a jump tree on power of two sized blocks, or a byte loop.
13527 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
13528 with specified algorithm.
13530 4) Epilogue: code copying tail of the block that is too small to be
13531 handled by main body (or up to size guarded by prologue guard). */
13534 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
13535 rtx expected_align_exp, rtx expected_size_exp)
13541 rtx jump_around_label = NULL;
13542 HOST_WIDE_INT align = 1;
13543 unsigned HOST_WIDE_INT count = 0;
13544 HOST_WIDE_INT expected_size = -1;
13545 int size_needed = 0, epilogue_size_needed;
13546 int desired_align = 0;
13547 enum stringop_alg alg;
13550 if (GET_CODE (align_exp) == CONST_INT)
13551 align = INTVAL (align_exp);
13552 /* i386 can do misaligned access on reasonably increased cost. */
13553 if (GET_CODE (expected_align_exp) == CONST_INT
13554 && INTVAL (expected_align_exp) > align)
13555 align = INTVAL (expected_align_exp);
13556 if (GET_CODE (count_exp) == CONST_INT)
13557 count = expected_size = INTVAL (count_exp);
13558 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13559 expected_size = INTVAL (expected_size_exp);
13561 /* Step 0: Decide on preferred algorithm, desired alignment and
13562 size of chunks to be copied by main loop. */
13564 alg = decide_alg (count, expected_size, false, &dynamic_check);
13565 desired_align = decide_alignment (align, alg, expected_size);
13567 if (!TARGET_ALIGN_STRINGOPS)
13568 align = desired_align;
13570 if (alg == libcall)
13572 gcc_assert (alg != no_stringop);
13574 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13575 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13576 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
13581 gcc_unreachable ();
13583 size_needed = GET_MODE_SIZE (Pmode);
13585 case unrolled_loop:
13586 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
13588 case rep_prefix_8_byte:
13591 case rep_prefix_4_byte:
13594 case rep_prefix_1_byte:
13600 epilogue_size_needed = size_needed;
13602 /* Step 1: Prologue guard. */
13604 /* Alignment code needs count to be in register. */
13605 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13607 enum machine_mode mode = SImode;
13608 if (TARGET_64BIT && (count & ~0xffffffff))
13610 count_exp = force_reg (mode, count_exp);
13612 gcc_assert (desired_align >= 1 && align >= 1);
13614 /* Ensure that alignment prologue won't copy past end of block. */
13615 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13618 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13620 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13621 Make sure it is power of 2. */
13622 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13624 label = gen_label_rtx ();
13625 emit_cmp_and_jump_insns (count_exp,
13626 GEN_INT (epilogue_size_needed),
13627 LTU, 0, GET_MODE (count_exp), 1, label);
13628 if (expected_size == -1 || expected_size < epilogue_size_needed)
13629 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13631 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13633 /* Emit code to decide on runtime whether library call or inline should be
13635 if (dynamic_check != -1)
13637 rtx hot_label = gen_label_rtx ();
13638 jump_around_label = gen_label_rtx ();
13639 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13640 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13641 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13642 emit_block_move_via_libcall (dst, src, count_exp, false);
13643 emit_jump (jump_around_label);
13644 emit_label (hot_label);
13647 /* Step 2: Alignment prologue. */
13649 if (desired_align > align)
13651 /* Except for the first move in epilogue, we no longer know
13652 constant offset in aliasing info. It don't seems to worth
13653 the pain to maintain it for the first move, so throw away
13655 src = change_address (src, BLKmode, srcreg);
13656 dst = change_address (dst, BLKmode, destreg);
13657 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
13660 if (label && size_needed == 1)
13662 emit_label (label);
13663 LABEL_NUSES (label) = 1;
13667 /* Step 3: Main loop. */
13673 gcc_unreachable ();
13675 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13676 count_exp, QImode, 1, expected_size);
13679 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13680 count_exp, Pmode, 1, expected_size);
13682 case unrolled_loop:
13683 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
13684 registers for 4 temporaries anyway. */
13685 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13686 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
13689 case rep_prefix_8_byte:
13690 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13693 case rep_prefix_4_byte:
13694 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13697 case rep_prefix_1_byte:
13698 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13702 /* Adjust properly the offset of src and dest memory for aliasing. */
13703 if (GET_CODE (count_exp) == CONST_INT)
13705 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
13706 (count / size_needed) * size_needed);
13707 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
13708 (count / size_needed) * size_needed);
13712 src = change_address (src, BLKmode, srcreg);
13713 dst = change_address (dst, BLKmode, destreg);
13716 /* Step 4: Epilogue to copy the remaining bytes. */
13720 /* When the main loop is done, COUNT_EXP might hold original count,
13721 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
13722 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
13723 bytes. Compensate if needed. */
13725 if (size_needed < epilogue_size_needed)
13728 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
13729 GEN_INT (size_needed - 1), count_exp, 1,
13731 if (tmp != count_exp)
13732 emit_move_insn (count_exp, tmp);
13734 emit_label (label);
13735 LABEL_NUSES (label) = 1;
13738 if (count_exp != const0_rtx && epilogue_size_needed > 1)
13739 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
13740 epilogue_size_needed);
13741 if (jump_around_label)
13742 emit_label (jump_around_label);
13746 /* Helper function for memcpy. For QImode value 0xXY produce
13747 0xXYXYXYXY of wide specified by MODE. This is essentially
13748 a * 0x10101010, but we can do slightly better than
13749 synth_mult by unwinding the sequence by hand on CPUs with
13752 promote_duplicated_reg (enum machine_mode mode, rtx val)
13754 enum machine_mode valmode = GET_MODE (val);
13756 int nops = mode == DImode ? 3 : 2;
13758 gcc_assert (mode == SImode || mode == DImode);
13759 if (val == const0_rtx)
13760 return copy_to_mode_reg (mode, const0_rtx);
13761 if (GET_CODE (val) == CONST_INT)
13763 HOST_WIDE_INT v = INTVAL (val) & 255;
13767 if (mode == DImode)
13768 v |= (v << 16) << 16;
13769 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
13772 if (valmode == VOIDmode)
13774 if (valmode != QImode)
13775 val = gen_lowpart (QImode, val);
13776 if (mode == QImode)
13778 if (!TARGET_PARTIAL_REG_STALL)
13780 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
13781 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
13782 <= (ix86_cost->shift_const + ix86_cost->add) * nops
13783 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
13785 rtx reg = convert_modes (mode, QImode, val, true);
13786 tmp = promote_duplicated_reg (mode, const1_rtx);
13787 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
13792 rtx reg = convert_modes (mode, QImode, val, true);
13794 if (!TARGET_PARTIAL_REG_STALL)
13795 if (mode == SImode)
13796 emit_insn (gen_movsi_insv_1 (reg, reg));
13798 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
13801 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
13802 NULL, 1, OPTAB_DIRECT);
13804 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13806 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
13807 NULL, 1, OPTAB_DIRECT);
13808 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13809 if (mode == SImode)
13811 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
13812 NULL, 1, OPTAB_DIRECT);
13813 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13818 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
13819 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
13820 alignment from ALIGN to DESIRED_ALIGN. */
13822 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
13827 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
13828 promoted_val = promote_duplicated_reg (DImode, val);
13829 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
13830 promoted_val = promote_duplicated_reg (SImode, val);
13831 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
13832 promoted_val = promote_duplicated_reg (HImode, val);
13834 promoted_val = val;
13836 return promoted_val;
13839 /* Expand string clear operation (bzero). Use i386 string operations when
13840 profitable. See expand_movmem comment for explanation of individual
13841 steps performed. */
13843 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
13844 rtx expected_align_exp, rtx expected_size_exp)
13849 rtx jump_around_label = NULL;
13850 HOST_WIDE_INT align = 1;
13851 unsigned HOST_WIDE_INT count = 0;
13852 HOST_WIDE_INT expected_size = -1;
13853 int size_needed = 0, epilogue_size_needed;
13854 int desired_align = 0;
13855 enum stringop_alg alg;
13856 rtx promoted_val = NULL;
13857 bool force_loopy_epilogue = false;
13860 if (GET_CODE (align_exp) == CONST_INT)
13861 align = INTVAL (align_exp);
13862 /* i386 can do misaligned access on reasonably increased cost. */
13863 if (GET_CODE (expected_align_exp) == CONST_INT
13864 && INTVAL (expected_align_exp) > align)
13865 align = INTVAL (expected_align_exp);
13866 if (GET_CODE (count_exp) == CONST_INT)
13867 count = expected_size = INTVAL (count_exp);
13868 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13869 expected_size = INTVAL (expected_size_exp);
13871 /* Step 0: Decide on preferred algorithm, desired alignment and
13872 size of chunks to be copied by main loop. */
13874 alg = decide_alg (count, expected_size, true, &dynamic_check);
13875 desired_align = decide_alignment (align, alg, expected_size);
13877 if (!TARGET_ALIGN_STRINGOPS)
13878 align = desired_align;
13880 if (alg == libcall)
13882 gcc_assert (alg != no_stringop);
13884 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13885 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13890 gcc_unreachable ();
13892 size_needed = GET_MODE_SIZE (Pmode);
13894 case unrolled_loop:
13895 size_needed = GET_MODE_SIZE (Pmode) * 4;
13897 case rep_prefix_8_byte:
13900 case rep_prefix_4_byte:
13903 case rep_prefix_1_byte:
13908 epilogue_size_needed = size_needed;
13910 /* Step 1: Prologue guard. */
13912 /* Alignment code needs count to be in register. */
13913 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13915 enum machine_mode mode = SImode;
13916 if (TARGET_64BIT && (count & ~0xffffffff))
13918 count_exp = force_reg (mode, count_exp);
13920 /* Do the cheap promotion to allow better CSE across the
13921 main loop and epilogue (ie one load of the big constant in the
13922 front of all code. */
13923 if (GET_CODE (val_exp) == CONST_INT)
13924 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13925 desired_align, align);
13926 /* Ensure that alignment prologue won't copy past end of block. */
13927 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13930 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13932 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13933 Make sure it is power of 2. */
13934 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13936 /* To improve performance of small blocks, we jump around the VAL
13937 promoting mode. This mean that if the promoted VAL is not constant,
13938 we might not use it in the epilogue and have to use byte
13940 if (epilogue_size_needed > 2 && !promoted_val)
13941 force_loopy_epilogue = true;
13942 label = gen_label_rtx ();
13943 emit_cmp_and_jump_insns (count_exp,
13944 GEN_INT (epilogue_size_needed),
13945 LTU, 0, GET_MODE (count_exp), 1, label);
13946 if (expected_size == -1 || expected_size <= epilogue_size_needed)
13947 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13949 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13951 if (dynamic_check != -1)
13953 rtx hot_label = gen_label_rtx ();
13954 jump_around_label = gen_label_rtx ();
13955 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13956 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13957 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13958 set_storage_via_libcall (dst, count_exp, val_exp, false);
13959 emit_jump (jump_around_label);
13960 emit_label (hot_label);
13963 /* Step 2: Alignment prologue. */
13965 /* Do the expensive promotion once we branched off the small blocks. */
13967 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13968 desired_align, align);
13969 gcc_assert (desired_align >= 1 && align >= 1);
13971 if (desired_align > align)
13973 /* Except for the first move in epilogue, we no longer know
13974 constant offset in aliasing info. It don't seems to worth
13975 the pain to maintain it for the first move, so throw away
13977 dst = change_address (dst, BLKmode, destreg);
13978 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
13981 if (label && size_needed == 1)
13983 emit_label (label);
13984 LABEL_NUSES (label) = 1;
13988 /* Step 3: Main loop. */
13994 gcc_unreachable ();
13996 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
13997 count_exp, QImode, 1, expected_size);
14000 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14001 count_exp, Pmode, 1, expected_size);
14003 case unrolled_loop:
14004 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14005 count_exp, Pmode, 4, expected_size);
14007 case rep_prefix_8_byte:
14008 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14011 case rep_prefix_4_byte:
14012 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14015 case rep_prefix_1_byte:
14016 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14020 /* Adjust properly the offset of src and dest memory for aliasing. */
14021 if (GET_CODE (count_exp) == CONST_INT)
14022 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
14023 (count / size_needed) * size_needed);
14025 dst = change_address (dst, BLKmode, destreg);
14027 /* Step 4: Epilogue to copy the remaining bytes. */
14031 /* When the main loop is done, COUNT_EXP might hold original count,
14032 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
14033 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
14034 bytes. Compensate if needed. */
14036 if (size_needed < desired_align - align)
14039 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
14040 GEN_INT (size_needed - 1), count_exp, 1,
14042 size_needed = desired_align - align + 1;
14043 if (tmp != count_exp)
14044 emit_move_insn (count_exp, tmp);
14046 emit_label (label);
14047 LABEL_NUSES (label) = 1;
14049 if (count_exp != const0_rtx && epilogue_size_needed > 1)
14051 if (force_loopy_epilogue)
14052 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
14055 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
14058 if (jump_around_label)
14059 emit_label (jump_around_label);
14063 /* Expand strlen. */
14065 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
14067 rtx addr, scratch1, scratch2, scratch3, scratch4;
14069 /* The generic case of strlen expander is long. Avoid it's
14070 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
14072 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14073 && !TARGET_INLINE_ALL_STRINGOPS
14075 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
14078 addr = force_reg (Pmode, XEXP (src, 0));
14079 scratch1 = gen_reg_rtx (Pmode);
14081 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14084 /* Well it seems that some optimizer does not combine a call like
14085 foo(strlen(bar), strlen(bar));
14086 when the move and the subtraction is done here. It does calculate
14087 the length just once when these instructions are done inside of
14088 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
14089 often used and I use one fewer register for the lifetime of
14090 output_strlen_unroll() this is better. */
14092 emit_move_insn (out, addr);
14094 ix86_expand_strlensi_unroll_1 (out, src, align);
14096 /* strlensi_unroll_1 returns the address of the zero at the end of
14097 the string, like memchr(), so compute the length by subtracting
14098 the start address. */
14100 emit_insn (gen_subdi3 (out, out, addr));
14102 emit_insn (gen_subsi3 (out, out, addr));
14107 scratch2 = gen_reg_rtx (Pmode);
14108 scratch3 = gen_reg_rtx (Pmode);
14109 scratch4 = force_reg (Pmode, constm1_rtx);
14111 emit_move_insn (scratch3, addr);
14112 eoschar = force_reg (QImode, eoschar);
14114 src = replace_equiv_address_nv (src, scratch3);
14116 /* If .md starts supporting :P, this can be done in .md. */
14117 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
14118 scratch4), UNSPEC_SCAS);
14119 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
14122 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
14123 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
14127 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
14128 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
14134 /* Expand the appropriate insns for doing strlen if not just doing
14137 out = result, initialized with the start address
14138 align_rtx = alignment of the address.
14139 scratch = scratch register, initialized with the startaddress when
14140 not aligned, otherwise undefined
14142 This is just the body. It needs the initializations mentioned above and
14143 some address computing at the end. These things are done in i386.md. */
14146 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
14150 rtx align_2_label = NULL_RTX;
14151 rtx align_3_label = NULL_RTX;
14152 rtx align_4_label = gen_label_rtx ();
14153 rtx end_0_label = gen_label_rtx ();
14155 rtx tmpreg = gen_reg_rtx (SImode);
14156 rtx scratch = gen_reg_rtx (SImode);
14160 if (GET_CODE (align_rtx) == CONST_INT)
14161 align = INTVAL (align_rtx);
14163 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
14165 /* Is there a known alignment and is it less than 4? */
14168 rtx scratch1 = gen_reg_rtx (Pmode);
14169 emit_move_insn (scratch1, out);
14170 /* Is there a known alignment and is it not 2? */
14173 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
14174 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
14176 /* Leave just the 3 lower bits. */
14177 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
14178 NULL_RTX, 0, OPTAB_WIDEN);
14180 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14181 Pmode, 1, align_4_label);
14182 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
14183 Pmode, 1, align_2_label);
14184 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
14185 Pmode, 1, align_3_label);
14189 /* Since the alignment is 2, we have to check 2 or 0 bytes;
14190 check if is aligned to 4 - byte. */
14192 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
14193 NULL_RTX, 0, OPTAB_WIDEN);
14195 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14196 Pmode, 1, align_4_label);
14199 mem = change_address (src, QImode, out);
14201 /* Now compare the bytes. */
14203 /* Compare the first n unaligned byte on a byte per byte basis. */
14204 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
14205 QImode, 1, end_0_label);
14207 /* Increment the address. */
14209 emit_insn (gen_adddi3 (out, out, const1_rtx));
14211 emit_insn (gen_addsi3 (out, out, const1_rtx));
14213 /* Not needed with an alignment of 2 */
14216 emit_label (align_2_label);
14218 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14222 emit_insn (gen_adddi3 (out, out, const1_rtx));
14224 emit_insn (gen_addsi3 (out, out, const1_rtx));
14226 emit_label (align_3_label);
14229 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14233 emit_insn (gen_adddi3 (out, out, const1_rtx));
14235 emit_insn (gen_addsi3 (out, out, const1_rtx));
14238 /* Generate loop to check 4 bytes at a time. It is not a good idea to
14239 align this loop. It gives only huge programs, but does not help to
14241 emit_label (align_4_label);
14243 mem = change_address (src, SImode, out);
14244 emit_move_insn (scratch, mem);
14246 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
14248 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
14250 /* This formula yields a nonzero result iff one of the bytes is zero.
14251 This saves three branches inside loop and many cycles. */
14253 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
14254 emit_insn (gen_one_cmplsi2 (scratch, scratch));
14255 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
14256 emit_insn (gen_andsi3 (tmpreg, tmpreg,
14257 gen_int_mode (0x80808080, SImode)));
14258 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
14263 rtx reg = gen_reg_rtx (SImode);
14264 rtx reg2 = gen_reg_rtx (Pmode);
14265 emit_move_insn (reg, tmpreg);
14266 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
14268 /* If zero is not in the first two bytes, move two bytes forward. */
14269 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14270 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14271 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14272 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
14273 gen_rtx_IF_THEN_ELSE (SImode, tmp,
14276 /* Emit lea manually to avoid clobbering of flags. */
14277 emit_insn (gen_rtx_SET (SImode, reg2,
14278 gen_rtx_PLUS (Pmode, out, const2_rtx)));
14280 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14281 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14282 emit_insn (gen_rtx_SET (VOIDmode, out,
14283 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
14290 rtx end_2_label = gen_label_rtx ();
14291 /* Is zero in the first two bytes? */
14293 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14294 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14295 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
14296 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14297 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
14299 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14300 JUMP_LABEL (tmp) = end_2_label;
14302 /* Not in the first two. Move two bytes forward. */
14303 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
14305 emit_insn (gen_adddi3 (out, out, const2_rtx));
14307 emit_insn (gen_addsi3 (out, out, const2_rtx));
14309 emit_label (end_2_label);
14313 /* Avoid branch in fixing the byte. */
14314 tmpreg = gen_lowpart (QImode, tmpreg);
14315 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
14316 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
14318 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
14320 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
14322 emit_label (end_0_label);
14326 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
14327 rtx callarg2 ATTRIBUTE_UNUSED,
14328 rtx pop, int sibcall)
14330 rtx use = NULL, call;
14332 if (pop == const0_rtx)
14334 gcc_assert (!TARGET_64BIT || !pop);
14336 if (TARGET_MACHO && !TARGET_64BIT)
14339 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
14340 fnaddr = machopic_indirect_call_target (fnaddr);
14345 /* Static functions and indirect calls don't need the pic register. */
14346 if (! TARGET_64BIT && flag_pic
14347 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
14348 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
14349 use_reg (&use, pic_offset_table_rtx);
14352 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
14354 rtx al = gen_rtx_REG (QImode, 0);
14355 emit_move_insn (al, callarg2);
14356 use_reg (&use, al);
14359 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
14361 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14362 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14364 if (sibcall && TARGET_64BIT
14365 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
14368 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14369 fnaddr = gen_rtx_REG (Pmode, R11_REG);
14370 emit_move_insn (fnaddr, addr);
14371 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14374 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
14376 call = gen_rtx_SET (VOIDmode, retval, call);
14379 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
14380 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
14381 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
14384 call = emit_call_insn (call);
14386 CALL_INSN_FUNCTION_USAGE (call) = use;
14390 /* Clear stack slot assignments remembered from previous functions.
14391 This is called from INIT_EXPANDERS once before RTL is emitted for each
14394 static struct machine_function *
14395 ix86_init_machine_status (void)
14397 struct machine_function *f;
14399 f = ggc_alloc_cleared (sizeof (struct machine_function));
14400 f->use_fast_prologue_epilogue_nregs = -1;
14401 f->tls_descriptor_call_expanded_p = 0;
14406 /* Return a MEM corresponding to a stack slot with mode MODE.
14407 Allocate a new slot if necessary.
14409 The RTL for a function can have several slots available: N is
14410 which slot to use. */
14413 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
14415 struct stack_local_entry *s;
14417 gcc_assert (n < MAX_386_STACK_LOCALS);
14419 for (s = ix86_stack_locals; s; s = s->next)
14420 if (s->mode == mode && s->n == n)
14421 return copy_rtx (s->rtl);
14423 s = (struct stack_local_entry *)
14424 ggc_alloc (sizeof (struct stack_local_entry));
14427 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
14429 s->next = ix86_stack_locals;
14430 ix86_stack_locals = s;
14434 /* Construct the SYMBOL_REF for the tls_get_addr function. */
14436 static GTY(()) rtx ix86_tls_symbol;
14438 ix86_tls_get_addr (void)
14441 if (!ix86_tls_symbol)
14443 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
14444 (TARGET_ANY_GNU_TLS
14446 ? "___tls_get_addr"
14447 : "__tls_get_addr");
14450 return ix86_tls_symbol;
14453 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
14455 static GTY(()) rtx ix86_tls_module_base_symbol;
14457 ix86_tls_module_base (void)
14460 if (!ix86_tls_module_base_symbol)
14462 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
14463 "_TLS_MODULE_BASE_");
14464 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
14465 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
14468 return ix86_tls_module_base_symbol;
14471 /* Calculate the length of the memory address in the instruction
14472 encoding. Does not include the one-byte modrm, opcode, or prefix. */
14475 memory_address_length (rtx addr)
14477 struct ix86_address parts;
14478 rtx base, index, disp;
14482 if (GET_CODE (addr) == PRE_DEC
14483 || GET_CODE (addr) == POST_INC
14484 || GET_CODE (addr) == PRE_MODIFY
14485 || GET_CODE (addr) == POST_MODIFY)
14488 ok = ix86_decompose_address (addr, &parts);
14491 if (parts.base && GET_CODE (parts.base) == SUBREG)
14492 parts.base = SUBREG_REG (parts.base);
14493 if (parts.index && GET_CODE (parts.index) == SUBREG)
14494 parts.index = SUBREG_REG (parts.index);
14497 index = parts.index;
14502 - esp as the base always wants an index,
14503 - ebp as the base always wants a displacement. */
14505 /* Register Indirect. */
14506 if (base && !index && !disp)
14508 /* esp (for its index) and ebp (for its displacement) need
14509 the two-byte modrm form. */
14510 if (addr == stack_pointer_rtx
14511 || addr == arg_pointer_rtx
14512 || addr == frame_pointer_rtx
14513 || addr == hard_frame_pointer_rtx)
14517 /* Direct Addressing. */
14518 else if (disp && !base && !index)
14523 /* Find the length of the displacement constant. */
14526 if (base && satisfies_constraint_K (disp))
14531 /* ebp always wants a displacement. */
14532 else if (base == hard_frame_pointer_rtx)
14535 /* An index requires the two-byte modrm form.... */
14537 /* ...like esp, which always wants an index. */
14538 || base == stack_pointer_rtx
14539 || base == arg_pointer_rtx
14540 || base == frame_pointer_rtx)
14547 /* Compute default value for "length_immediate" attribute. When SHORTFORM
14548 is set, expect that insn have 8bit immediate alternative. */
14550 ix86_attr_length_immediate_default (rtx insn, int shortform)
14554 extract_insn_cached (insn);
14555 for (i = recog_data.n_operands - 1; i >= 0; --i)
14556 if (CONSTANT_P (recog_data.operand[i]))
14559 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
14563 switch (get_attr_mode (insn))
14574 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
14579 fatal_insn ("unknown insn mode", insn);
14585 /* Compute default value for "length_address" attribute. */
14587 ix86_attr_length_address_default (rtx insn)
14591 if (get_attr_type (insn) == TYPE_LEA)
14593 rtx set = PATTERN (insn);
14595 if (GET_CODE (set) == PARALLEL)
14596 set = XVECEXP (set, 0, 0);
14598 gcc_assert (GET_CODE (set) == SET);
14600 return memory_address_length (SET_SRC (set));
14603 extract_insn_cached (insn);
14604 for (i = recog_data.n_operands - 1; i >= 0; --i)
14605 if (GET_CODE (recog_data.operand[i]) == MEM)
14607 return memory_address_length (XEXP (recog_data.operand[i], 0));
14613 /* Return the maximum number of instructions a cpu can issue. */
14616 ix86_issue_rate (void)
14620 case PROCESSOR_PENTIUM:
14624 case PROCESSOR_PENTIUMPRO:
14625 case PROCESSOR_PENTIUM4:
14626 case PROCESSOR_ATHLON:
14628 case PROCESSOR_NOCONA:
14629 case PROCESSOR_GENERIC32:
14630 case PROCESSOR_GENERIC64:
14633 case PROCESSOR_CORE2:
14641 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
14642 by DEP_INSN and nothing set by DEP_INSN. */
14645 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14649 /* Simplify the test for uninteresting insns. */
14650 if (insn_type != TYPE_SETCC
14651 && insn_type != TYPE_ICMOV
14652 && insn_type != TYPE_FCMOV
14653 && insn_type != TYPE_IBR)
14656 if ((set = single_set (dep_insn)) != 0)
14658 set = SET_DEST (set);
14661 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
14662 && XVECLEN (PATTERN (dep_insn), 0) == 2
14663 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
14664 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
14666 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14667 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14672 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
14675 /* This test is true if the dependent insn reads the flags but
14676 not any other potentially set register. */
14677 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
14680 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
14686 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
14687 address with operands set by DEP_INSN. */
14690 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14694 if (insn_type == TYPE_LEA
14697 addr = PATTERN (insn);
14699 if (GET_CODE (addr) == PARALLEL)
14700 addr = XVECEXP (addr, 0, 0);
14702 gcc_assert (GET_CODE (addr) == SET);
14704 addr = SET_SRC (addr);
14709 extract_insn_cached (insn);
14710 for (i = recog_data.n_operands - 1; i >= 0; --i)
14711 if (GET_CODE (recog_data.operand[i]) == MEM)
14713 addr = XEXP (recog_data.operand[i], 0);
14720 return modified_in_p (addr, dep_insn);
14724 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
14726 enum attr_type insn_type, dep_insn_type;
14727 enum attr_memory memory;
14729 int dep_insn_code_number;
14731 /* Anti and output dependencies have zero cost on all CPUs. */
14732 if (REG_NOTE_KIND (link) != 0)
14735 dep_insn_code_number = recog_memoized (dep_insn);
14737 /* If we can't recognize the insns, we can't really do anything. */
14738 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
14741 insn_type = get_attr_type (insn);
14742 dep_insn_type = get_attr_type (dep_insn);
14746 case PROCESSOR_PENTIUM:
14747 /* Address Generation Interlock adds a cycle of latency. */
14748 if (ix86_agi_dependent (insn, dep_insn, insn_type))
14751 /* ??? Compares pair with jump/setcc. */
14752 if (ix86_flags_dependent (insn, dep_insn, insn_type))
14755 /* Floating point stores require value to be ready one cycle earlier. */
14756 if (insn_type == TYPE_FMOV
14757 && get_attr_memory (insn) == MEMORY_STORE
14758 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14762 case PROCESSOR_PENTIUMPRO:
14763 memory = get_attr_memory (insn);
14765 /* INT->FP conversion is expensive. */
14766 if (get_attr_fp_int_src (dep_insn))
14769 /* There is one cycle extra latency between an FP op and a store. */
14770 if (insn_type == TYPE_FMOV
14771 && (set = single_set (dep_insn)) != NULL_RTX
14772 && (set2 = single_set (insn)) != NULL_RTX
14773 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
14774 && GET_CODE (SET_DEST (set2)) == MEM)
14777 /* Show ability of reorder buffer to hide latency of load by executing
14778 in parallel with previous instruction in case
14779 previous instruction is not needed to compute the address. */
14780 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14781 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14783 /* Claim moves to take one cycle, as core can issue one load
14784 at time and the next load can start cycle later. */
14785 if (dep_insn_type == TYPE_IMOV
14786 || dep_insn_type == TYPE_FMOV)
14794 memory = get_attr_memory (insn);
14796 /* The esp dependency is resolved before the instruction is really
14798 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
14799 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
14802 /* INT->FP conversion is expensive. */
14803 if (get_attr_fp_int_src (dep_insn))
14806 /* Show ability of reorder buffer to hide latency of load by executing
14807 in parallel with previous instruction in case
14808 previous instruction is not needed to compute the address. */
14809 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14810 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14812 /* Claim moves to take one cycle, as core can issue one load
14813 at time and the next load can start cycle later. */
14814 if (dep_insn_type == TYPE_IMOV
14815 || dep_insn_type == TYPE_FMOV)
14824 case PROCESSOR_ATHLON:
14826 case PROCESSOR_GENERIC32:
14827 case PROCESSOR_GENERIC64:
14828 memory = get_attr_memory (insn);
14830 /* Show ability of reorder buffer to hide latency of load by executing
14831 in parallel with previous instruction in case
14832 previous instruction is not needed to compute the address. */
14833 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14834 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14836 enum attr_unit unit = get_attr_unit (insn);
14839 /* Because of the difference between the length of integer and
14840 floating unit pipeline preparation stages, the memory operands
14841 for floating point are cheaper.
14843 ??? For Athlon it the difference is most probably 2. */
14844 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
14847 loadcost = TARGET_ATHLON ? 2 : 0;
14849 if (cost >= loadcost)
14862 /* How many alternative schedules to try. This should be as wide as the
14863 scheduling freedom in the DFA, but no wider. Making this value too
14864 large results extra work for the scheduler. */
14867 ia32_multipass_dfa_lookahead (void)
14869 if (ix86_tune == PROCESSOR_PENTIUM)
14872 if (ix86_tune == PROCESSOR_PENTIUMPRO
14873 || ix86_tune == PROCESSOR_K6)
14881 /* Compute the alignment given to a constant that is being placed in memory.
14882 EXP is the constant and ALIGN is the alignment that the object would
14884 The value of this function is used instead of that alignment to align
14888 ix86_constant_alignment (tree exp, int align)
14890 if (TREE_CODE (exp) == REAL_CST)
14892 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
14894 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
14897 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
14898 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
14899 return BITS_PER_WORD;
14904 /* Compute the alignment for a static variable.
14905 TYPE is the data type, and ALIGN is the alignment that
14906 the object would ordinarily have. The value of this function is used
14907 instead of that alignment to align the object. */
14910 ix86_data_alignment (tree type, int align)
14912 int max_align = optimize_size ? BITS_PER_WORD : 256;
14914 if (AGGREGATE_TYPE_P (type)
14915 && TYPE_SIZE (type)
14916 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14917 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
14918 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
14919 && align < max_align)
14922 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14923 to 16byte boundary. */
14926 if (AGGREGATE_TYPE_P (type)
14927 && TYPE_SIZE (type)
14928 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14929 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
14930 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14934 if (TREE_CODE (type) == ARRAY_TYPE)
14936 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14938 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14941 else if (TREE_CODE (type) == COMPLEX_TYPE)
14944 if (TYPE_MODE (type) == DCmode && align < 64)
14946 if (TYPE_MODE (type) == XCmode && align < 128)
14949 else if ((TREE_CODE (type) == RECORD_TYPE
14950 || TREE_CODE (type) == UNION_TYPE
14951 || TREE_CODE (type) == QUAL_UNION_TYPE)
14952 && TYPE_FIELDS (type))
14954 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
14956 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
14959 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
14960 || TREE_CODE (type) == INTEGER_TYPE)
14962 if (TYPE_MODE (type) == DFmode && align < 64)
14964 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
14971 /* Compute the alignment for a local variable.
14972 TYPE is the data type, and ALIGN is the alignment that
14973 the object would ordinarily have. The value of this macro is used
14974 instead of that alignment to align the object. */
14977 ix86_local_alignment (tree type, int align)
14979 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14980 to 16byte boundary. */
14983 if (AGGREGATE_TYPE_P (type)
14984 && TYPE_SIZE (type)
14985 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14986 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
14987 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14990 if (TREE_CODE (type) == ARRAY_TYPE)
14992 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14994 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14997 else if (TREE_CODE (type) == COMPLEX_TYPE)
14999 if (TYPE_MODE (type) == DCmode && align < 64)
15001 if (TYPE_MODE (type) == XCmode && align < 128)
15004 else if ((TREE_CODE (type) == RECORD_TYPE
15005 || TREE_CODE (type) == UNION_TYPE
15006 || TREE_CODE (type) == QUAL_UNION_TYPE)
15007 && TYPE_FIELDS (type))
15009 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
15011 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
15014 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
15015 || TREE_CODE (type) == INTEGER_TYPE)
15018 if (TYPE_MODE (type) == DFmode && align < 64)
15020 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
15026 /* Emit RTL insns to initialize the variable parts of a trampoline.
15027 FNADDR is an RTX for the address of the function's pure code.
15028 CXT is an RTX for the static chain value for the function. */
15030 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
15034 /* Compute offset from the end of the jmp to the target function. */
15035 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
15036 plus_constant (tramp, 10),
15037 NULL_RTX, 1, OPTAB_DIRECT);
15038 emit_move_insn (gen_rtx_MEM (QImode, tramp),
15039 gen_int_mode (0xb9, QImode));
15040 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
15041 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
15042 gen_int_mode (0xe9, QImode));
15043 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
15048 /* Try to load address using shorter movl instead of movabs.
15049 We may want to support movq for kernel mode, but kernel does not use
15050 trampolines at the moment. */
15051 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
15053 fnaddr = copy_to_mode_reg (DImode, fnaddr);
15054 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15055 gen_int_mode (0xbb41, HImode));
15056 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
15057 gen_lowpart (SImode, fnaddr));
15062 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15063 gen_int_mode (0xbb49, HImode));
15064 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15068 /* Load static chain using movabs to r10. */
15069 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15070 gen_int_mode (0xba49, HImode));
15071 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15074 /* Jump to the r11 */
15075 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15076 gen_int_mode (0xff49, HImode));
15077 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
15078 gen_int_mode (0xe3, QImode));
15080 gcc_assert (offset <= TRAMPOLINE_SIZE);
15083 #ifdef ENABLE_EXECUTE_STACK
15084 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
15085 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
15089 /* Codes for all the SSE/MMX builtins. */
15092 IX86_BUILTIN_ADDPS,
15093 IX86_BUILTIN_ADDSS,
15094 IX86_BUILTIN_DIVPS,
15095 IX86_BUILTIN_DIVSS,
15096 IX86_BUILTIN_MULPS,
15097 IX86_BUILTIN_MULSS,
15098 IX86_BUILTIN_SUBPS,
15099 IX86_BUILTIN_SUBSS,
15101 IX86_BUILTIN_CMPEQPS,
15102 IX86_BUILTIN_CMPLTPS,
15103 IX86_BUILTIN_CMPLEPS,
15104 IX86_BUILTIN_CMPGTPS,
15105 IX86_BUILTIN_CMPGEPS,
15106 IX86_BUILTIN_CMPNEQPS,
15107 IX86_BUILTIN_CMPNLTPS,
15108 IX86_BUILTIN_CMPNLEPS,
15109 IX86_BUILTIN_CMPNGTPS,
15110 IX86_BUILTIN_CMPNGEPS,
15111 IX86_BUILTIN_CMPORDPS,
15112 IX86_BUILTIN_CMPUNORDPS,
15113 IX86_BUILTIN_CMPEQSS,
15114 IX86_BUILTIN_CMPLTSS,
15115 IX86_BUILTIN_CMPLESS,
15116 IX86_BUILTIN_CMPNEQSS,
15117 IX86_BUILTIN_CMPNLTSS,
15118 IX86_BUILTIN_CMPNLESS,
15119 IX86_BUILTIN_CMPNGTSS,
15120 IX86_BUILTIN_CMPNGESS,
15121 IX86_BUILTIN_CMPORDSS,
15122 IX86_BUILTIN_CMPUNORDSS,
15124 IX86_BUILTIN_COMIEQSS,
15125 IX86_BUILTIN_COMILTSS,
15126 IX86_BUILTIN_COMILESS,
15127 IX86_BUILTIN_COMIGTSS,
15128 IX86_BUILTIN_COMIGESS,
15129 IX86_BUILTIN_COMINEQSS,
15130 IX86_BUILTIN_UCOMIEQSS,
15131 IX86_BUILTIN_UCOMILTSS,
15132 IX86_BUILTIN_UCOMILESS,
15133 IX86_BUILTIN_UCOMIGTSS,
15134 IX86_BUILTIN_UCOMIGESS,
15135 IX86_BUILTIN_UCOMINEQSS,
15137 IX86_BUILTIN_CVTPI2PS,
15138 IX86_BUILTIN_CVTPS2PI,
15139 IX86_BUILTIN_CVTSI2SS,
15140 IX86_BUILTIN_CVTSI642SS,
15141 IX86_BUILTIN_CVTSS2SI,
15142 IX86_BUILTIN_CVTSS2SI64,
15143 IX86_BUILTIN_CVTTPS2PI,
15144 IX86_BUILTIN_CVTTSS2SI,
15145 IX86_BUILTIN_CVTTSS2SI64,
15147 IX86_BUILTIN_MAXPS,
15148 IX86_BUILTIN_MAXSS,
15149 IX86_BUILTIN_MINPS,
15150 IX86_BUILTIN_MINSS,
15152 IX86_BUILTIN_LOADUPS,
15153 IX86_BUILTIN_STOREUPS,
15154 IX86_BUILTIN_MOVSS,
15156 IX86_BUILTIN_MOVHLPS,
15157 IX86_BUILTIN_MOVLHPS,
15158 IX86_BUILTIN_LOADHPS,
15159 IX86_BUILTIN_LOADLPS,
15160 IX86_BUILTIN_STOREHPS,
15161 IX86_BUILTIN_STORELPS,
15163 IX86_BUILTIN_MASKMOVQ,
15164 IX86_BUILTIN_MOVMSKPS,
15165 IX86_BUILTIN_PMOVMSKB,
15167 IX86_BUILTIN_MOVNTPS,
15168 IX86_BUILTIN_MOVNTQ,
15170 IX86_BUILTIN_LOADDQU,
15171 IX86_BUILTIN_STOREDQU,
15173 IX86_BUILTIN_PACKSSWB,
15174 IX86_BUILTIN_PACKSSDW,
15175 IX86_BUILTIN_PACKUSWB,
15177 IX86_BUILTIN_PADDB,
15178 IX86_BUILTIN_PADDW,
15179 IX86_BUILTIN_PADDD,
15180 IX86_BUILTIN_PADDQ,
15181 IX86_BUILTIN_PADDSB,
15182 IX86_BUILTIN_PADDSW,
15183 IX86_BUILTIN_PADDUSB,
15184 IX86_BUILTIN_PADDUSW,
15185 IX86_BUILTIN_PSUBB,
15186 IX86_BUILTIN_PSUBW,
15187 IX86_BUILTIN_PSUBD,
15188 IX86_BUILTIN_PSUBQ,
15189 IX86_BUILTIN_PSUBSB,
15190 IX86_BUILTIN_PSUBSW,
15191 IX86_BUILTIN_PSUBUSB,
15192 IX86_BUILTIN_PSUBUSW,
15195 IX86_BUILTIN_PANDN,
15199 IX86_BUILTIN_PAVGB,
15200 IX86_BUILTIN_PAVGW,
15202 IX86_BUILTIN_PCMPEQB,
15203 IX86_BUILTIN_PCMPEQW,
15204 IX86_BUILTIN_PCMPEQD,
15205 IX86_BUILTIN_PCMPGTB,
15206 IX86_BUILTIN_PCMPGTW,
15207 IX86_BUILTIN_PCMPGTD,
15209 IX86_BUILTIN_PMADDWD,
15211 IX86_BUILTIN_PMAXSW,
15212 IX86_BUILTIN_PMAXUB,
15213 IX86_BUILTIN_PMINSW,
15214 IX86_BUILTIN_PMINUB,
15216 IX86_BUILTIN_PMULHUW,
15217 IX86_BUILTIN_PMULHW,
15218 IX86_BUILTIN_PMULLW,
15220 IX86_BUILTIN_PSADBW,
15221 IX86_BUILTIN_PSHUFW,
15223 IX86_BUILTIN_PSLLW,
15224 IX86_BUILTIN_PSLLD,
15225 IX86_BUILTIN_PSLLQ,
15226 IX86_BUILTIN_PSRAW,
15227 IX86_BUILTIN_PSRAD,
15228 IX86_BUILTIN_PSRLW,
15229 IX86_BUILTIN_PSRLD,
15230 IX86_BUILTIN_PSRLQ,
15231 IX86_BUILTIN_PSLLWI,
15232 IX86_BUILTIN_PSLLDI,
15233 IX86_BUILTIN_PSLLQI,
15234 IX86_BUILTIN_PSRAWI,
15235 IX86_BUILTIN_PSRADI,
15236 IX86_BUILTIN_PSRLWI,
15237 IX86_BUILTIN_PSRLDI,
15238 IX86_BUILTIN_PSRLQI,
15240 IX86_BUILTIN_PUNPCKHBW,
15241 IX86_BUILTIN_PUNPCKHWD,
15242 IX86_BUILTIN_PUNPCKHDQ,
15243 IX86_BUILTIN_PUNPCKLBW,
15244 IX86_BUILTIN_PUNPCKLWD,
15245 IX86_BUILTIN_PUNPCKLDQ,
15247 IX86_BUILTIN_SHUFPS,
15249 IX86_BUILTIN_RCPPS,
15250 IX86_BUILTIN_RCPSS,
15251 IX86_BUILTIN_RSQRTPS,
15252 IX86_BUILTIN_RSQRTSS,
15253 IX86_BUILTIN_SQRTPS,
15254 IX86_BUILTIN_SQRTSS,
15256 IX86_BUILTIN_UNPCKHPS,
15257 IX86_BUILTIN_UNPCKLPS,
15259 IX86_BUILTIN_ANDPS,
15260 IX86_BUILTIN_ANDNPS,
15262 IX86_BUILTIN_XORPS,
15265 IX86_BUILTIN_LDMXCSR,
15266 IX86_BUILTIN_STMXCSR,
15267 IX86_BUILTIN_SFENCE,
15269 /* 3DNow! Original */
15270 IX86_BUILTIN_FEMMS,
15271 IX86_BUILTIN_PAVGUSB,
15272 IX86_BUILTIN_PF2ID,
15273 IX86_BUILTIN_PFACC,
15274 IX86_BUILTIN_PFADD,
15275 IX86_BUILTIN_PFCMPEQ,
15276 IX86_BUILTIN_PFCMPGE,
15277 IX86_BUILTIN_PFCMPGT,
15278 IX86_BUILTIN_PFMAX,
15279 IX86_BUILTIN_PFMIN,
15280 IX86_BUILTIN_PFMUL,
15281 IX86_BUILTIN_PFRCP,
15282 IX86_BUILTIN_PFRCPIT1,
15283 IX86_BUILTIN_PFRCPIT2,
15284 IX86_BUILTIN_PFRSQIT1,
15285 IX86_BUILTIN_PFRSQRT,
15286 IX86_BUILTIN_PFSUB,
15287 IX86_BUILTIN_PFSUBR,
15288 IX86_BUILTIN_PI2FD,
15289 IX86_BUILTIN_PMULHRW,
15291 /* 3DNow! Athlon Extensions */
15292 IX86_BUILTIN_PF2IW,
15293 IX86_BUILTIN_PFNACC,
15294 IX86_BUILTIN_PFPNACC,
15295 IX86_BUILTIN_PI2FW,
15296 IX86_BUILTIN_PSWAPDSI,
15297 IX86_BUILTIN_PSWAPDSF,
15300 IX86_BUILTIN_ADDPD,
15301 IX86_BUILTIN_ADDSD,
15302 IX86_BUILTIN_DIVPD,
15303 IX86_BUILTIN_DIVSD,
15304 IX86_BUILTIN_MULPD,
15305 IX86_BUILTIN_MULSD,
15306 IX86_BUILTIN_SUBPD,
15307 IX86_BUILTIN_SUBSD,
15309 IX86_BUILTIN_CMPEQPD,
15310 IX86_BUILTIN_CMPLTPD,
15311 IX86_BUILTIN_CMPLEPD,
15312 IX86_BUILTIN_CMPGTPD,
15313 IX86_BUILTIN_CMPGEPD,
15314 IX86_BUILTIN_CMPNEQPD,
15315 IX86_BUILTIN_CMPNLTPD,
15316 IX86_BUILTIN_CMPNLEPD,
15317 IX86_BUILTIN_CMPNGTPD,
15318 IX86_BUILTIN_CMPNGEPD,
15319 IX86_BUILTIN_CMPORDPD,
15320 IX86_BUILTIN_CMPUNORDPD,
15321 IX86_BUILTIN_CMPNEPD,
15322 IX86_BUILTIN_CMPEQSD,
15323 IX86_BUILTIN_CMPLTSD,
15324 IX86_BUILTIN_CMPLESD,
15325 IX86_BUILTIN_CMPNEQSD,
15326 IX86_BUILTIN_CMPNLTSD,
15327 IX86_BUILTIN_CMPNLESD,
15328 IX86_BUILTIN_CMPORDSD,
15329 IX86_BUILTIN_CMPUNORDSD,
15330 IX86_BUILTIN_CMPNESD,
15332 IX86_BUILTIN_COMIEQSD,
15333 IX86_BUILTIN_COMILTSD,
15334 IX86_BUILTIN_COMILESD,
15335 IX86_BUILTIN_COMIGTSD,
15336 IX86_BUILTIN_COMIGESD,
15337 IX86_BUILTIN_COMINEQSD,
15338 IX86_BUILTIN_UCOMIEQSD,
15339 IX86_BUILTIN_UCOMILTSD,
15340 IX86_BUILTIN_UCOMILESD,
15341 IX86_BUILTIN_UCOMIGTSD,
15342 IX86_BUILTIN_UCOMIGESD,
15343 IX86_BUILTIN_UCOMINEQSD,
15345 IX86_BUILTIN_MAXPD,
15346 IX86_BUILTIN_MAXSD,
15347 IX86_BUILTIN_MINPD,
15348 IX86_BUILTIN_MINSD,
15350 IX86_BUILTIN_ANDPD,
15351 IX86_BUILTIN_ANDNPD,
15353 IX86_BUILTIN_XORPD,
15355 IX86_BUILTIN_SQRTPD,
15356 IX86_BUILTIN_SQRTSD,
15358 IX86_BUILTIN_UNPCKHPD,
15359 IX86_BUILTIN_UNPCKLPD,
15361 IX86_BUILTIN_SHUFPD,
15363 IX86_BUILTIN_LOADUPD,
15364 IX86_BUILTIN_STOREUPD,
15365 IX86_BUILTIN_MOVSD,
15367 IX86_BUILTIN_LOADHPD,
15368 IX86_BUILTIN_LOADLPD,
15370 IX86_BUILTIN_CVTDQ2PD,
15371 IX86_BUILTIN_CVTDQ2PS,
15373 IX86_BUILTIN_CVTPD2DQ,
15374 IX86_BUILTIN_CVTPD2PI,
15375 IX86_BUILTIN_CVTPD2PS,
15376 IX86_BUILTIN_CVTTPD2DQ,
15377 IX86_BUILTIN_CVTTPD2PI,
15379 IX86_BUILTIN_CVTPI2PD,
15380 IX86_BUILTIN_CVTSI2SD,
15381 IX86_BUILTIN_CVTSI642SD,
15383 IX86_BUILTIN_CVTSD2SI,
15384 IX86_BUILTIN_CVTSD2SI64,
15385 IX86_BUILTIN_CVTSD2SS,
15386 IX86_BUILTIN_CVTSS2SD,
15387 IX86_BUILTIN_CVTTSD2SI,
15388 IX86_BUILTIN_CVTTSD2SI64,
15390 IX86_BUILTIN_CVTPS2DQ,
15391 IX86_BUILTIN_CVTPS2PD,
15392 IX86_BUILTIN_CVTTPS2DQ,
15394 IX86_BUILTIN_MOVNTI,
15395 IX86_BUILTIN_MOVNTPD,
15396 IX86_BUILTIN_MOVNTDQ,
15399 IX86_BUILTIN_MASKMOVDQU,
15400 IX86_BUILTIN_MOVMSKPD,
15401 IX86_BUILTIN_PMOVMSKB128,
15403 IX86_BUILTIN_PACKSSWB128,
15404 IX86_BUILTIN_PACKSSDW128,
15405 IX86_BUILTIN_PACKUSWB128,
15407 IX86_BUILTIN_PADDB128,
15408 IX86_BUILTIN_PADDW128,
15409 IX86_BUILTIN_PADDD128,
15410 IX86_BUILTIN_PADDQ128,
15411 IX86_BUILTIN_PADDSB128,
15412 IX86_BUILTIN_PADDSW128,
15413 IX86_BUILTIN_PADDUSB128,
15414 IX86_BUILTIN_PADDUSW128,
15415 IX86_BUILTIN_PSUBB128,
15416 IX86_BUILTIN_PSUBW128,
15417 IX86_BUILTIN_PSUBD128,
15418 IX86_BUILTIN_PSUBQ128,
15419 IX86_BUILTIN_PSUBSB128,
15420 IX86_BUILTIN_PSUBSW128,
15421 IX86_BUILTIN_PSUBUSB128,
15422 IX86_BUILTIN_PSUBUSW128,
15424 IX86_BUILTIN_PAND128,
15425 IX86_BUILTIN_PANDN128,
15426 IX86_BUILTIN_POR128,
15427 IX86_BUILTIN_PXOR128,
15429 IX86_BUILTIN_PAVGB128,
15430 IX86_BUILTIN_PAVGW128,
15432 IX86_BUILTIN_PCMPEQB128,
15433 IX86_BUILTIN_PCMPEQW128,
15434 IX86_BUILTIN_PCMPEQD128,
15435 IX86_BUILTIN_PCMPGTB128,
15436 IX86_BUILTIN_PCMPGTW128,
15437 IX86_BUILTIN_PCMPGTD128,
15439 IX86_BUILTIN_PMADDWD128,
15441 IX86_BUILTIN_PMAXSW128,
15442 IX86_BUILTIN_PMAXUB128,
15443 IX86_BUILTIN_PMINSW128,
15444 IX86_BUILTIN_PMINUB128,
15446 IX86_BUILTIN_PMULUDQ,
15447 IX86_BUILTIN_PMULUDQ128,
15448 IX86_BUILTIN_PMULHUW128,
15449 IX86_BUILTIN_PMULHW128,
15450 IX86_BUILTIN_PMULLW128,
15452 IX86_BUILTIN_PSADBW128,
15453 IX86_BUILTIN_PSHUFHW,
15454 IX86_BUILTIN_PSHUFLW,
15455 IX86_BUILTIN_PSHUFD,
15457 IX86_BUILTIN_PSLLW128,
15458 IX86_BUILTIN_PSLLD128,
15459 IX86_BUILTIN_PSLLQ128,
15460 IX86_BUILTIN_PSRAW128,
15461 IX86_BUILTIN_PSRAD128,
15462 IX86_BUILTIN_PSRLW128,
15463 IX86_BUILTIN_PSRLD128,
15464 IX86_BUILTIN_PSRLQ128,
15465 IX86_BUILTIN_PSLLDQI128,
15466 IX86_BUILTIN_PSLLWI128,
15467 IX86_BUILTIN_PSLLDI128,
15468 IX86_BUILTIN_PSLLQI128,
15469 IX86_BUILTIN_PSRAWI128,
15470 IX86_BUILTIN_PSRADI128,
15471 IX86_BUILTIN_PSRLDQI128,
15472 IX86_BUILTIN_PSRLWI128,
15473 IX86_BUILTIN_PSRLDI128,
15474 IX86_BUILTIN_PSRLQI128,
15476 IX86_BUILTIN_PUNPCKHBW128,
15477 IX86_BUILTIN_PUNPCKHWD128,
15478 IX86_BUILTIN_PUNPCKHDQ128,
15479 IX86_BUILTIN_PUNPCKHQDQ128,
15480 IX86_BUILTIN_PUNPCKLBW128,
15481 IX86_BUILTIN_PUNPCKLWD128,
15482 IX86_BUILTIN_PUNPCKLDQ128,
15483 IX86_BUILTIN_PUNPCKLQDQ128,
15485 IX86_BUILTIN_CLFLUSH,
15486 IX86_BUILTIN_MFENCE,
15487 IX86_BUILTIN_LFENCE,
15489 /* Prescott New Instructions. */
15490 IX86_BUILTIN_ADDSUBPS,
15491 IX86_BUILTIN_HADDPS,
15492 IX86_BUILTIN_HSUBPS,
15493 IX86_BUILTIN_MOVSHDUP,
15494 IX86_BUILTIN_MOVSLDUP,
15495 IX86_BUILTIN_ADDSUBPD,
15496 IX86_BUILTIN_HADDPD,
15497 IX86_BUILTIN_HSUBPD,
15498 IX86_BUILTIN_LDDQU,
15500 IX86_BUILTIN_MONITOR,
15501 IX86_BUILTIN_MWAIT,
15504 IX86_BUILTIN_PHADDW,
15505 IX86_BUILTIN_PHADDD,
15506 IX86_BUILTIN_PHADDSW,
15507 IX86_BUILTIN_PHSUBW,
15508 IX86_BUILTIN_PHSUBD,
15509 IX86_BUILTIN_PHSUBSW,
15510 IX86_BUILTIN_PMADDUBSW,
15511 IX86_BUILTIN_PMULHRSW,
15512 IX86_BUILTIN_PSHUFB,
15513 IX86_BUILTIN_PSIGNB,
15514 IX86_BUILTIN_PSIGNW,
15515 IX86_BUILTIN_PSIGND,
15516 IX86_BUILTIN_PALIGNR,
15517 IX86_BUILTIN_PABSB,
15518 IX86_BUILTIN_PABSW,
15519 IX86_BUILTIN_PABSD,
15521 IX86_BUILTIN_PHADDW128,
15522 IX86_BUILTIN_PHADDD128,
15523 IX86_BUILTIN_PHADDSW128,
15524 IX86_BUILTIN_PHSUBW128,
15525 IX86_BUILTIN_PHSUBD128,
15526 IX86_BUILTIN_PHSUBSW128,
15527 IX86_BUILTIN_PMADDUBSW128,
15528 IX86_BUILTIN_PMULHRSW128,
15529 IX86_BUILTIN_PSHUFB128,
15530 IX86_BUILTIN_PSIGNB128,
15531 IX86_BUILTIN_PSIGNW128,
15532 IX86_BUILTIN_PSIGND128,
15533 IX86_BUILTIN_PALIGNR128,
15534 IX86_BUILTIN_PABSB128,
15535 IX86_BUILTIN_PABSW128,
15536 IX86_BUILTIN_PABSD128,
15538 IX86_BUILTIN_VEC_INIT_V2SI,
15539 IX86_BUILTIN_VEC_INIT_V4HI,
15540 IX86_BUILTIN_VEC_INIT_V8QI,
15541 IX86_BUILTIN_VEC_EXT_V2DF,
15542 IX86_BUILTIN_VEC_EXT_V2DI,
15543 IX86_BUILTIN_VEC_EXT_V4SF,
15544 IX86_BUILTIN_VEC_EXT_V4SI,
15545 IX86_BUILTIN_VEC_EXT_V8HI,
15546 IX86_BUILTIN_VEC_EXT_V2SI,
15547 IX86_BUILTIN_VEC_EXT_V4HI,
15548 IX86_BUILTIN_VEC_SET_V8HI,
15549 IX86_BUILTIN_VEC_SET_V4HI,
15554 /* Table for the ix86 builtin decls. */
15555 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
15557 /* Add a ix86 target builtin function with CODE, NAME and TYPE. Do so,
15558 * if the target_flags include one of MASK. Stores the function decl
15559 * in the ix86_builtins array.
15560 * Returns the function decl or NULL_TREE, if the builtin was not added. */
15563 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
15565 tree decl = NULL_TREE;
15567 if (mask & target_flags
15568 && (!(mask & MASK_64BIT) || TARGET_64BIT))
15570 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
15572 ix86_builtins[(int) code] = decl;
15578 /* Like def_builtin, but also marks the function decl "const". */
15581 def_builtin_const (int mask, const char *name, tree type,
15582 enum ix86_builtins code)
15584 tree decl = def_builtin (mask, name, type, code);
15586 TREE_READONLY (decl) = 1;
15590 /* Bits for builtin_description.flag. */
15592 /* Set when we don't support the comparison natively, and should
15593 swap_comparison in order to support it. */
15594 #define BUILTIN_DESC_SWAP_OPERANDS 1
15596 struct builtin_description
15598 const unsigned int mask;
15599 const enum insn_code icode;
15600 const char *const name;
15601 const enum ix86_builtins code;
15602 const enum rtx_code comparison;
15603 const unsigned int flag;
15606 static const struct builtin_description bdesc_comi[] =
15608 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
15609 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
15610 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
15611 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
15612 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
15613 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
15614 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
15615 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
15616 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
15617 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
15618 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
15619 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
15620 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
15621 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
15622 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
15623 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
15624 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
15625 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
15626 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
15627 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
15628 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
15629 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
15630 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
15631 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
15634 static const struct builtin_description bdesc_2arg[] =
15637 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
15638 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
15639 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
15640 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
15641 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
15642 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
15643 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
15644 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
15646 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
15647 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
15648 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
15649 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
15650 BUILTIN_DESC_SWAP_OPERANDS },
15651 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
15652 BUILTIN_DESC_SWAP_OPERANDS },
15653 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
15654 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
15655 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
15656 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
15657 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
15658 BUILTIN_DESC_SWAP_OPERANDS },
15659 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
15660 BUILTIN_DESC_SWAP_OPERANDS },
15661 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
15662 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
15663 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
15664 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
15665 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
15666 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
15667 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
15668 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
15669 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
15670 BUILTIN_DESC_SWAP_OPERANDS },
15671 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
15672 BUILTIN_DESC_SWAP_OPERANDS },
15673 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
15675 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
15676 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
15677 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
15678 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
15680 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
15681 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
15682 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
15683 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
15685 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
15686 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
15687 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
15688 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
15689 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
15692 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
15693 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
15694 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
15695 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
15696 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
15697 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
15698 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
15699 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
15701 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
15702 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
15703 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
15704 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
15705 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
15706 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
15707 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
15708 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
15710 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
15711 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
15712 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
15714 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
15715 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
15716 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
15717 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
15719 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
15720 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
15722 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
15723 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
15724 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
15725 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
15726 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
15727 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
15729 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
15730 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
15731 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
15732 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
15734 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
15735 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
15736 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
15737 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
15738 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
15739 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
15742 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
15743 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
15744 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
15746 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
15747 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
15748 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
15750 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
15751 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
15752 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
15753 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
15754 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
15755 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
15757 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
15758 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
15759 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
15760 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
15761 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
15762 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
15764 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
15765 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
15766 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
15767 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
15769 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
15770 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
15773 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
15774 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
15775 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
15776 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
15777 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
15778 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
15779 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
15780 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
15782 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
15783 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
15784 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
15785 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
15786 BUILTIN_DESC_SWAP_OPERANDS },
15787 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
15788 BUILTIN_DESC_SWAP_OPERANDS },
15789 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
15790 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
15791 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
15792 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
15793 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
15794 BUILTIN_DESC_SWAP_OPERANDS },
15795 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
15796 BUILTIN_DESC_SWAP_OPERANDS },
15797 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
15798 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
15799 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
15800 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
15801 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
15802 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
15803 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
15804 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
15805 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
15807 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
15808 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
15809 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
15810 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
15812 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
15813 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
15814 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
15815 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
15817 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
15818 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
15819 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
15822 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
15823 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
15824 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
15825 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
15826 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
15827 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
15828 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
15829 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
15831 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
15832 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
15833 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
15834 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
15835 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
15836 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
15837 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
15838 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
15840 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
15841 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
15843 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
15844 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
15845 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
15846 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
15848 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
15849 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
15851 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
15852 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
15853 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
15854 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
15855 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
15856 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
15858 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
15859 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
15860 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
15861 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
15863 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
15864 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
15865 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
15866 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
15867 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
15868 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
15869 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
15870 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
15872 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
15873 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
15874 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
15876 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
15877 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
15879 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
15880 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
15882 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
15883 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
15884 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
15886 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
15887 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
15888 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
15890 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
15891 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
15893 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
15895 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
15896 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
15897 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
15898 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
15901 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
15902 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
15903 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
15904 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
15905 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
15906 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
15909 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
15910 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
15911 { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
15912 { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
15913 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
15914 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
15915 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
15916 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
15917 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
15918 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
15919 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
15920 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
15921 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
15922 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
15923 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
15924 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
15925 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
15926 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
15927 { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
15928 { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
15929 { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
15930 { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
15931 { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
15932 { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
15935 static const struct builtin_description bdesc_1arg[] =
15937 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
15938 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
15940 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
15941 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
15942 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
15944 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
15945 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
15946 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
15947 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
15948 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
15949 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
15951 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
15952 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
15954 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
15956 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
15957 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
15959 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
15960 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
15961 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
15962 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
15963 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
15965 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
15967 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
15968 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
15969 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
15970 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
15972 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
15973 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
15974 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
15977 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
15978 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
15981 { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
15982 { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
15983 { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
15984 { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
15985 { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
15986 { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
15990 ix86_init_builtins (void)
15993 ix86_init_mmx_sse_builtins ();
15996 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
15997 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
16000 ix86_init_mmx_sse_builtins (void)
16002 const struct builtin_description * d;
16005 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
16006 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
16007 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
16008 tree V2DI_type_node
16009 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
16010 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
16011 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
16012 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
16013 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
16014 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
16015 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
16017 tree pchar_type_node = build_pointer_type (char_type_node);
16018 tree pcchar_type_node = build_pointer_type (
16019 build_type_variant (char_type_node, 1, 0));
16020 tree pfloat_type_node = build_pointer_type (float_type_node);
16021 tree pcfloat_type_node = build_pointer_type (
16022 build_type_variant (float_type_node, 1, 0));
16023 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
16024 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
16025 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
16028 tree int_ftype_v4sf_v4sf
16029 = build_function_type_list (integer_type_node,
16030 V4SF_type_node, V4SF_type_node, NULL_TREE);
16031 tree v4si_ftype_v4sf_v4sf
16032 = build_function_type_list (V4SI_type_node,
16033 V4SF_type_node, V4SF_type_node, NULL_TREE);
16034 /* MMX/SSE/integer conversions. */
16035 tree int_ftype_v4sf
16036 = build_function_type_list (integer_type_node,
16037 V4SF_type_node, NULL_TREE);
16038 tree int64_ftype_v4sf
16039 = build_function_type_list (long_long_integer_type_node,
16040 V4SF_type_node, NULL_TREE);
16041 tree int_ftype_v8qi
16042 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
16043 tree v4sf_ftype_v4sf_int
16044 = build_function_type_list (V4SF_type_node,
16045 V4SF_type_node, integer_type_node, NULL_TREE);
16046 tree v4sf_ftype_v4sf_int64
16047 = build_function_type_list (V4SF_type_node,
16048 V4SF_type_node, long_long_integer_type_node,
16050 tree v4sf_ftype_v4sf_v2si
16051 = build_function_type_list (V4SF_type_node,
16052 V4SF_type_node, V2SI_type_node, NULL_TREE);
16054 /* Miscellaneous. */
16055 tree v8qi_ftype_v4hi_v4hi
16056 = build_function_type_list (V8QI_type_node,
16057 V4HI_type_node, V4HI_type_node, NULL_TREE);
16058 tree v4hi_ftype_v2si_v2si
16059 = build_function_type_list (V4HI_type_node,
16060 V2SI_type_node, V2SI_type_node, NULL_TREE);
16061 tree v4sf_ftype_v4sf_v4sf_int
16062 = build_function_type_list (V4SF_type_node,
16063 V4SF_type_node, V4SF_type_node,
16064 integer_type_node, NULL_TREE);
16065 tree v2si_ftype_v4hi_v4hi
16066 = build_function_type_list (V2SI_type_node,
16067 V4HI_type_node, V4HI_type_node, NULL_TREE);
16068 tree v4hi_ftype_v4hi_int
16069 = build_function_type_list (V4HI_type_node,
16070 V4HI_type_node, integer_type_node, NULL_TREE);
16071 tree v4hi_ftype_v4hi_di
16072 = build_function_type_list (V4HI_type_node,
16073 V4HI_type_node, long_long_unsigned_type_node,
16075 tree v2si_ftype_v2si_di
16076 = build_function_type_list (V2SI_type_node,
16077 V2SI_type_node, long_long_unsigned_type_node,
16079 tree void_ftype_void
16080 = build_function_type (void_type_node, void_list_node);
16081 tree void_ftype_unsigned
16082 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
16083 tree void_ftype_unsigned_unsigned
16084 = build_function_type_list (void_type_node, unsigned_type_node,
16085 unsigned_type_node, NULL_TREE);
16086 tree void_ftype_pcvoid_unsigned_unsigned
16087 = build_function_type_list (void_type_node, const_ptr_type_node,
16088 unsigned_type_node, unsigned_type_node,
16090 tree unsigned_ftype_void
16091 = build_function_type (unsigned_type_node, void_list_node);
16092 tree v2si_ftype_v4sf
16093 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
16094 /* Loads/stores. */
16095 tree void_ftype_v8qi_v8qi_pchar
16096 = build_function_type_list (void_type_node,
16097 V8QI_type_node, V8QI_type_node,
16098 pchar_type_node, NULL_TREE);
16099 tree v4sf_ftype_pcfloat
16100 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
16101 /* @@@ the type is bogus */
16102 tree v4sf_ftype_v4sf_pv2si
16103 = build_function_type_list (V4SF_type_node,
16104 V4SF_type_node, pv2si_type_node, NULL_TREE);
16105 tree void_ftype_pv2si_v4sf
16106 = build_function_type_list (void_type_node,
16107 pv2si_type_node, V4SF_type_node, NULL_TREE);
16108 tree void_ftype_pfloat_v4sf
16109 = build_function_type_list (void_type_node,
16110 pfloat_type_node, V4SF_type_node, NULL_TREE);
16111 tree void_ftype_pdi_di
16112 = build_function_type_list (void_type_node,
16113 pdi_type_node, long_long_unsigned_type_node,
16115 tree void_ftype_pv2di_v2di
16116 = build_function_type_list (void_type_node,
16117 pv2di_type_node, V2DI_type_node, NULL_TREE);
16118 /* Normal vector unops. */
16119 tree v4sf_ftype_v4sf
16120 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16121 tree v16qi_ftype_v16qi
16122 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16123 tree v8hi_ftype_v8hi
16124 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16125 tree v4si_ftype_v4si
16126 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16127 tree v8qi_ftype_v8qi
16128 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
16129 tree v4hi_ftype_v4hi
16130 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
16132 /* Normal vector binops. */
16133 tree v4sf_ftype_v4sf_v4sf
16134 = build_function_type_list (V4SF_type_node,
16135 V4SF_type_node, V4SF_type_node, NULL_TREE);
16136 tree v8qi_ftype_v8qi_v8qi
16137 = build_function_type_list (V8QI_type_node,
16138 V8QI_type_node, V8QI_type_node, NULL_TREE);
16139 tree v4hi_ftype_v4hi_v4hi
16140 = build_function_type_list (V4HI_type_node,
16141 V4HI_type_node, V4HI_type_node, NULL_TREE);
16142 tree v2si_ftype_v2si_v2si
16143 = build_function_type_list (V2SI_type_node,
16144 V2SI_type_node, V2SI_type_node, NULL_TREE);
16145 tree di_ftype_di_di
16146 = build_function_type_list (long_long_unsigned_type_node,
16147 long_long_unsigned_type_node,
16148 long_long_unsigned_type_node, NULL_TREE);
16150 tree di_ftype_di_di_int
16151 = build_function_type_list (long_long_unsigned_type_node,
16152 long_long_unsigned_type_node,
16153 long_long_unsigned_type_node,
16154 integer_type_node, NULL_TREE);
16156 tree v2si_ftype_v2sf
16157 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
16158 tree v2sf_ftype_v2si
16159 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
16160 tree v2si_ftype_v2si
16161 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
16162 tree v2sf_ftype_v2sf
16163 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
16164 tree v2sf_ftype_v2sf_v2sf
16165 = build_function_type_list (V2SF_type_node,
16166 V2SF_type_node, V2SF_type_node, NULL_TREE);
16167 tree v2si_ftype_v2sf_v2sf
16168 = build_function_type_list (V2SI_type_node,
16169 V2SF_type_node, V2SF_type_node, NULL_TREE);
16170 tree pint_type_node = build_pointer_type (integer_type_node);
16171 tree pdouble_type_node = build_pointer_type (double_type_node);
16172 tree pcdouble_type_node = build_pointer_type (
16173 build_type_variant (double_type_node, 1, 0));
16174 tree int_ftype_v2df_v2df
16175 = build_function_type_list (integer_type_node,
16176 V2DF_type_node, V2DF_type_node, NULL_TREE);
16178 tree void_ftype_pcvoid
16179 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
16180 tree v4sf_ftype_v4si
16181 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
16182 tree v4si_ftype_v4sf
16183 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
16184 tree v2df_ftype_v4si
16185 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
16186 tree v4si_ftype_v2df
16187 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
16188 tree v2si_ftype_v2df
16189 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
16190 tree v4sf_ftype_v2df
16191 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
16192 tree v2df_ftype_v2si
16193 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
16194 tree v2df_ftype_v4sf
16195 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
16196 tree int_ftype_v2df
16197 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
16198 tree int64_ftype_v2df
16199 = build_function_type_list (long_long_integer_type_node,
16200 V2DF_type_node, NULL_TREE);
16201 tree v2df_ftype_v2df_int
16202 = build_function_type_list (V2DF_type_node,
16203 V2DF_type_node, integer_type_node, NULL_TREE);
16204 tree v2df_ftype_v2df_int64
16205 = build_function_type_list (V2DF_type_node,
16206 V2DF_type_node, long_long_integer_type_node,
16208 tree v4sf_ftype_v4sf_v2df
16209 = build_function_type_list (V4SF_type_node,
16210 V4SF_type_node, V2DF_type_node, NULL_TREE);
16211 tree v2df_ftype_v2df_v4sf
16212 = build_function_type_list (V2DF_type_node,
16213 V2DF_type_node, V4SF_type_node, NULL_TREE);
16214 tree v2df_ftype_v2df_v2df_int
16215 = build_function_type_list (V2DF_type_node,
16216 V2DF_type_node, V2DF_type_node,
16219 tree v2df_ftype_v2df_pcdouble
16220 = build_function_type_list (V2DF_type_node,
16221 V2DF_type_node, pcdouble_type_node, NULL_TREE);
16222 tree void_ftype_pdouble_v2df
16223 = build_function_type_list (void_type_node,
16224 pdouble_type_node, V2DF_type_node, NULL_TREE);
16225 tree void_ftype_pint_int
16226 = build_function_type_list (void_type_node,
16227 pint_type_node, integer_type_node, NULL_TREE);
16228 tree void_ftype_v16qi_v16qi_pchar
16229 = build_function_type_list (void_type_node,
16230 V16QI_type_node, V16QI_type_node,
16231 pchar_type_node, NULL_TREE);
16232 tree v2df_ftype_pcdouble
16233 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
16234 tree v2df_ftype_v2df_v2df
16235 = build_function_type_list (V2DF_type_node,
16236 V2DF_type_node, V2DF_type_node, NULL_TREE);
16237 tree v16qi_ftype_v16qi_v16qi
16238 = build_function_type_list (V16QI_type_node,
16239 V16QI_type_node, V16QI_type_node, NULL_TREE);
16240 tree v8hi_ftype_v8hi_v8hi
16241 = build_function_type_list (V8HI_type_node,
16242 V8HI_type_node, V8HI_type_node, NULL_TREE);
16243 tree v4si_ftype_v4si_v4si
16244 = build_function_type_list (V4SI_type_node,
16245 V4SI_type_node, V4SI_type_node, NULL_TREE);
16246 tree v2di_ftype_v2di_v2di
16247 = build_function_type_list (V2DI_type_node,
16248 V2DI_type_node, V2DI_type_node, NULL_TREE);
16249 tree v2di_ftype_v2df_v2df
16250 = build_function_type_list (V2DI_type_node,
16251 V2DF_type_node, V2DF_type_node, NULL_TREE);
16252 tree v2df_ftype_v2df
16253 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16254 tree v2di_ftype_v2di_int
16255 = build_function_type_list (V2DI_type_node,
16256 V2DI_type_node, integer_type_node, NULL_TREE);
16257 tree v2di_ftype_v2di_v2di_int
16258 = build_function_type_list (V2DI_type_node, V2DI_type_node,
16259 V2DI_type_node, integer_type_node, NULL_TREE);
16260 tree v4si_ftype_v4si_int
16261 = build_function_type_list (V4SI_type_node,
16262 V4SI_type_node, integer_type_node, NULL_TREE);
16263 tree v8hi_ftype_v8hi_int
16264 = build_function_type_list (V8HI_type_node,
16265 V8HI_type_node, integer_type_node, NULL_TREE);
16266 tree v8hi_ftype_v8hi_v2di
16267 = build_function_type_list (V8HI_type_node,
16268 V8HI_type_node, V2DI_type_node, NULL_TREE);
16269 tree v4si_ftype_v4si_v2di
16270 = build_function_type_list (V4SI_type_node,
16271 V4SI_type_node, V2DI_type_node, NULL_TREE);
16272 tree v4si_ftype_v8hi_v8hi
16273 = build_function_type_list (V4SI_type_node,
16274 V8HI_type_node, V8HI_type_node, NULL_TREE);
16275 tree di_ftype_v8qi_v8qi
16276 = build_function_type_list (long_long_unsigned_type_node,
16277 V8QI_type_node, V8QI_type_node, NULL_TREE);
16278 tree di_ftype_v2si_v2si
16279 = build_function_type_list (long_long_unsigned_type_node,
16280 V2SI_type_node, V2SI_type_node, NULL_TREE);
16281 tree v2di_ftype_v16qi_v16qi
16282 = build_function_type_list (V2DI_type_node,
16283 V16QI_type_node, V16QI_type_node, NULL_TREE);
16284 tree v2di_ftype_v4si_v4si
16285 = build_function_type_list (V2DI_type_node,
16286 V4SI_type_node, V4SI_type_node, NULL_TREE);
16287 tree int_ftype_v16qi
16288 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
16289 tree v16qi_ftype_pcchar
16290 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
16291 tree void_ftype_pchar_v16qi
16292 = build_function_type_list (void_type_node,
16293 pchar_type_node, V16QI_type_node, NULL_TREE);
16296 tree float128_type;
16299 /* The __float80 type. */
16300 if (TYPE_MODE (long_double_type_node) == XFmode)
16301 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
16305 /* The __float80 type. */
16306 float80_type = make_node (REAL_TYPE);
16307 TYPE_PRECISION (float80_type) = 80;
16308 layout_type (float80_type);
16309 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
16314 float128_type = make_node (REAL_TYPE);
16315 TYPE_PRECISION (float128_type) = 128;
16316 layout_type (float128_type);
16317 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
16320 /* Add all builtins that are more or less simple operations on two
16322 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16324 /* Use one of the operands; the target can have a different mode for
16325 mask-generating compares. */
16326 enum machine_mode mode;
16331 mode = insn_data[d->icode].operand[1].mode;
16336 type = v16qi_ftype_v16qi_v16qi;
16339 type = v8hi_ftype_v8hi_v8hi;
16342 type = v4si_ftype_v4si_v4si;
16345 type = v2di_ftype_v2di_v2di;
16348 type = v2df_ftype_v2df_v2df;
16351 type = v4sf_ftype_v4sf_v4sf;
16354 type = v8qi_ftype_v8qi_v8qi;
16357 type = v4hi_ftype_v4hi_v4hi;
16360 type = v2si_ftype_v2si_v2si;
16363 type = di_ftype_di_di;
16367 gcc_unreachable ();
16370 /* Override for comparisons. */
16371 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
16372 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
16373 type = v4si_ftype_v4sf_v4sf;
16375 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
16376 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
16377 type = v2di_ftype_v2df_v2df;
16379 def_builtin (d->mask, d->name, type, d->code);
16382 /* Add all builtins that are more or less simple operations on 1 operand. */
16383 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16385 enum machine_mode mode;
16390 mode = insn_data[d->icode].operand[1].mode;
16395 type = v16qi_ftype_v16qi;
16398 type = v8hi_ftype_v8hi;
16401 type = v4si_ftype_v4si;
16404 type = v2df_ftype_v2df;
16407 type = v4sf_ftype_v4sf;
16410 type = v8qi_ftype_v8qi;
16413 type = v4hi_ftype_v4hi;
16416 type = v2si_ftype_v2si;
16423 def_builtin (d->mask, d->name, type, d->code);
16426 /* Add the remaining MMX insns with somewhat more complicated types. */
16427 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
16428 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
16429 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
16430 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
16432 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
16433 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
16434 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
16436 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
16437 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
16439 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
16440 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
16442 /* comi/ucomi insns. */
16443 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
16444 if (d->mask == MASK_SSE2)
16445 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
16447 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
16449 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
16450 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
16451 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
16453 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
16454 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
16455 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
16456 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
16457 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
16458 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
16459 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
16460 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
16461 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
16462 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
16463 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
16465 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
16467 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
16468 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
16470 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
16471 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
16472 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
16473 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
16475 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
16476 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
16477 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
16478 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
16480 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
16482 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
16484 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
16485 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
16486 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
16487 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
16488 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
16489 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
16491 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
16493 /* Original 3DNow! */
16494 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
16495 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
16496 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
16497 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
16498 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
16499 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
16500 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
16501 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
16502 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
16503 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
16504 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
16505 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
16506 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
16507 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
16508 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
16509 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
16510 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
16511 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
16512 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
16513 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
16515 /* 3DNow! extension as used in the Athlon CPU. */
16516 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
16517 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
16518 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
16519 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
16520 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
16521 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
16524 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
16526 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
16527 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
16529 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
16530 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
16532 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
16533 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
16534 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
16535 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
16536 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
16538 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
16539 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
16540 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
16541 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
16543 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
16544 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
16546 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
16548 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
16549 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
16551 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
16552 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
16553 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
16554 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
16555 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
16557 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
16559 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
16560 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
16561 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
16562 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
16564 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
16565 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
16566 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
16568 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
16569 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
16570 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
16571 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
16573 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
16574 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
16575 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
16577 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
16578 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
16580 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
16581 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
16583 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
16584 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
16585 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
16587 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
16588 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
16589 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
16591 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
16592 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
16594 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
16595 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
16596 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
16597 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
16599 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
16600 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
16601 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
16602 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
16604 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
16605 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
16607 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
16609 /* Prescott New Instructions. */
16610 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
16611 void_ftype_pcvoid_unsigned_unsigned,
16612 IX86_BUILTIN_MONITOR);
16613 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
16614 void_ftype_unsigned_unsigned,
16615 IX86_BUILTIN_MWAIT);
16616 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
16618 IX86_BUILTIN_MOVSHDUP);
16619 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
16621 IX86_BUILTIN_MOVSLDUP);
16622 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
16623 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
16626 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
16627 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
16628 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
16629 IX86_BUILTIN_PALIGNR);
16631 /* Access to the vec_init patterns. */
16632 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
16633 integer_type_node, NULL_TREE);
16634 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
16635 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
16637 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
16638 short_integer_type_node,
16639 short_integer_type_node,
16640 short_integer_type_node, NULL_TREE);
16641 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
16642 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
16644 ftype = build_function_type_list (V8QI_type_node, char_type_node,
16645 char_type_node, char_type_node,
16646 char_type_node, char_type_node,
16647 char_type_node, char_type_node,
16648 char_type_node, NULL_TREE);
16649 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
16650 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
16652 /* Access to the vec_extract patterns. */
16653 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16654 integer_type_node, NULL_TREE);
16655 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
16656 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
16658 ftype = build_function_type_list (long_long_integer_type_node,
16659 V2DI_type_node, integer_type_node,
16661 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
16662 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
16664 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16665 integer_type_node, NULL_TREE);
16666 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
16667 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
16669 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16670 integer_type_node, NULL_TREE);
16671 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
16672 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
16674 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16675 integer_type_node, NULL_TREE);
16676 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
16677 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
16679 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
16680 integer_type_node, NULL_TREE);
16681 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
16682 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
16684 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
16685 integer_type_node, NULL_TREE);
16686 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
16687 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
16689 /* Access to the vec_set patterns. */
16690 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16692 integer_type_node, NULL_TREE);
16693 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
16694 ftype, IX86_BUILTIN_VEC_SET_V8HI);
16696 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
16698 integer_type_node, NULL_TREE);
16699 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
16700 ftype, IX86_BUILTIN_VEC_SET_V4HI);
16703 /* Errors in the source file can cause expand_expr to return const0_rtx
16704 where we expect a vector. To avoid crashing, use one of the vector
16705 clear instructions. */
16707 safe_vector_operand (rtx x, enum machine_mode mode)
16709 if (x == const0_rtx)
16710 x = CONST0_RTX (mode);
16714 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
16717 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
16720 tree arg0 = TREE_VALUE (arglist);
16721 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16722 rtx op0 = expand_normal (arg0);
16723 rtx op1 = expand_normal (arg1);
16724 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16725 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16726 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
16728 if (VECTOR_MODE_P (mode0))
16729 op0 = safe_vector_operand (op0, mode0);
16730 if (VECTOR_MODE_P (mode1))
16731 op1 = safe_vector_operand (op1, mode1);
16733 if (optimize || !target
16734 || GET_MODE (target) != tmode
16735 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16736 target = gen_reg_rtx (tmode);
16738 if (GET_MODE (op1) == SImode && mode1 == TImode)
16740 rtx x = gen_reg_rtx (V4SImode);
16741 emit_insn (gen_sse2_loadd (x, op1));
16742 op1 = gen_lowpart (TImode, x);
16745 /* The insn must want input operands in the same modes as the
16747 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
16748 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
16750 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16751 op0 = copy_to_mode_reg (mode0, op0);
16752 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16753 op1 = copy_to_mode_reg (mode1, op1);
16755 /* ??? Using ix86_fixup_binary_operands is problematic when
16756 we've got mismatched modes. Fake it. */
16762 if (tmode == mode0 && tmode == mode1)
16764 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
16768 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
16770 op0 = force_reg (mode0, op0);
16771 op1 = force_reg (mode1, op1);
16772 target = gen_reg_rtx (tmode);
16775 pat = GEN_FCN (icode) (target, op0, op1);
16782 /* Subroutine of ix86_expand_builtin to take care of stores. */
16785 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
16788 tree arg0 = TREE_VALUE (arglist);
16789 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16790 rtx op0 = expand_normal (arg0);
16791 rtx op1 = expand_normal (arg1);
16792 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
16793 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
16795 if (VECTOR_MODE_P (mode1))
16796 op1 = safe_vector_operand (op1, mode1);
16798 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16799 op1 = copy_to_mode_reg (mode1, op1);
16801 pat = GEN_FCN (icode) (op0, op1);
16807 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
16810 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
16811 rtx target, int do_load)
16814 tree arg0 = TREE_VALUE (arglist);
16815 rtx op0 = expand_normal (arg0);
16816 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16817 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16819 if (optimize || !target
16820 || GET_MODE (target) != tmode
16821 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16822 target = gen_reg_rtx (tmode);
16824 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16827 if (VECTOR_MODE_P (mode0))
16828 op0 = safe_vector_operand (op0, mode0);
16830 if ((optimize && !register_operand (op0, mode0))
16831 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16832 op0 = copy_to_mode_reg (mode0, op0);
16835 pat = GEN_FCN (icode) (target, op0);
16842 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
16843 sqrtss, rsqrtss, rcpss. */
16846 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
16849 tree arg0 = TREE_VALUE (arglist);
16850 rtx op1, op0 = expand_normal (arg0);
16851 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16852 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16854 if (optimize || !target
16855 || GET_MODE (target) != tmode
16856 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16857 target = gen_reg_rtx (tmode);
16859 if (VECTOR_MODE_P (mode0))
16860 op0 = safe_vector_operand (op0, mode0);
16862 if ((optimize && !register_operand (op0, mode0))
16863 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16864 op0 = copy_to_mode_reg (mode0, op0);
16867 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
16868 op1 = copy_to_mode_reg (mode0, op1);
16870 pat = GEN_FCN (icode) (target, op0, op1);
16877 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
16880 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
16884 tree arg0 = TREE_VALUE (arglist);
16885 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16886 rtx op0 = expand_normal (arg0);
16887 rtx op1 = expand_normal (arg1);
16889 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
16890 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
16891 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
16892 enum rtx_code comparison = d->comparison;
16894 if (VECTOR_MODE_P (mode0))
16895 op0 = safe_vector_operand (op0, mode0);
16896 if (VECTOR_MODE_P (mode1))
16897 op1 = safe_vector_operand (op1, mode1);
16899 /* Swap operands if we have a comparison that isn't available in
16901 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16903 rtx tmp = gen_reg_rtx (mode1);
16904 emit_move_insn (tmp, op1);
16909 if (optimize || !target
16910 || GET_MODE (target) != tmode
16911 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
16912 target = gen_reg_rtx (tmode);
16914 if ((optimize && !register_operand (op0, mode0))
16915 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
16916 op0 = copy_to_mode_reg (mode0, op0);
16917 if ((optimize && !register_operand (op1, mode1))
16918 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
16919 op1 = copy_to_mode_reg (mode1, op1);
16921 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16922 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
16929 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
16932 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
16936 tree arg0 = TREE_VALUE (arglist);
16937 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16938 rtx op0 = expand_normal (arg0);
16939 rtx op1 = expand_normal (arg1);
16941 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
16942 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
16943 enum rtx_code comparison = d->comparison;
16945 if (VECTOR_MODE_P (mode0))
16946 op0 = safe_vector_operand (op0, mode0);
16947 if (VECTOR_MODE_P (mode1))
16948 op1 = safe_vector_operand (op1, mode1);
16950 /* Swap operands if we have a comparison that isn't available in
16952 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16959 target = gen_reg_rtx (SImode);
16960 emit_move_insn (target, const0_rtx);
16961 target = gen_rtx_SUBREG (QImode, target, 0);
16963 if ((optimize && !register_operand (op0, mode0))
16964 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
16965 op0 = copy_to_mode_reg (mode0, op0);
16966 if ((optimize && !register_operand (op1, mode1))
16967 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
16968 op1 = copy_to_mode_reg (mode1, op1);
16970 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16971 pat = GEN_FCN (d->icode) (op0, op1);
16975 emit_insn (gen_rtx_SET (VOIDmode,
16976 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
16977 gen_rtx_fmt_ee (comparison, QImode,
16981 return SUBREG_REG (target);
16984 /* Return the integer constant in ARG. Constrain it to be in the range
16985 of the subparts of VEC_TYPE; issue an error if not. */
16988 get_element_number (tree vec_type, tree arg)
16990 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
16992 if (!host_integerp (arg, 1)
16993 || (elt = tree_low_cst (arg, 1), elt > max))
16995 error ("selector must be an integer constant in the range 0..%wi", max);
17002 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17003 ix86_expand_vector_init. We DO have language-level syntax for this, in
17004 the form of (type){ init-list }. Except that since we can't place emms
17005 instructions from inside the compiler, we can't allow the use of MMX
17006 registers unless the user explicitly asks for it. So we do *not* define
17007 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
17008 we have builtins invoked by mmintrin.h that gives us license to emit
17009 these sorts of instructions. */
17012 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
17014 enum machine_mode tmode = TYPE_MODE (type);
17015 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
17016 int i, n_elt = GET_MODE_NUNITS (tmode);
17017 rtvec v = rtvec_alloc (n_elt);
17019 gcc_assert (VECTOR_MODE_P (tmode));
17021 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
17023 rtx x = expand_normal (TREE_VALUE (arglist));
17024 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
17027 gcc_assert (arglist == NULL);
17029 if (!target || !register_operand (target, tmode))
17030 target = gen_reg_rtx (tmode);
17032 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
17036 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17037 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
17038 had a language-level syntax for referencing vector elements. */
17041 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
17043 enum machine_mode tmode, mode0;
17048 arg0 = TREE_VALUE (arglist);
17049 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17051 op0 = expand_normal (arg0);
17052 elt = get_element_number (TREE_TYPE (arg0), arg1);
17054 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17055 mode0 = TYPE_MODE (TREE_TYPE (arg0));
17056 gcc_assert (VECTOR_MODE_P (mode0));
17058 op0 = force_reg (mode0, op0);
17060 if (optimize || !target || !register_operand (target, tmode))
17061 target = gen_reg_rtx (tmode);
17063 ix86_expand_vector_extract (true, target, op0, elt);
17068 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17069 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
17070 a language-level syntax for referencing vector elements. */
17073 ix86_expand_vec_set_builtin (tree arglist)
17075 enum machine_mode tmode, mode1;
17076 tree arg0, arg1, arg2;
17080 arg0 = TREE_VALUE (arglist);
17081 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17082 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17084 tmode = TYPE_MODE (TREE_TYPE (arg0));
17085 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17086 gcc_assert (VECTOR_MODE_P (tmode));
17088 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
17089 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
17090 elt = get_element_number (TREE_TYPE (arg0), arg2);
17092 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
17093 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
17095 op0 = force_reg (tmode, op0);
17096 op1 = force_reg (mode1, op1);
17098 ix86_expand_vector_set (true, op0, op1, elt);
17103 /* Expand an expression EXP that calls a built-in function,
17104 with result going to TARGET if that's convenient
17105 (and in mode MODE if that's convenient).
17106 SUBTARGET may be used as the target for computing one of EXP's operands.
17107 IGNORE is nonzero if the value is to be ignored. */
17110 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17111 enum machine_mode mode ATTRIBUTE_UNUSED,
17112 int ignore ATTRIBUTE_UNUSED)
17114 const struct builtin_description *d;
17116 enum insn_code icode;
17117 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
17118 tree arglist = TREE_OPERAND (exp, 1);
17119 tree arg0, arg1, arg2;
17120 rtx op0, op1, op2, pat;
17121 enum machine_mode tmode, mode0, mode1, mode2, mode3;
17122 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
17126 case IX86_BUILTIN_EMMS:
17127 emit_insn (gen_mmx_emms ());
17130 case IX86_BUILTIN_SFENCE:
17131 emit_insn (gen_sse_sfence ());
17134 case IX86_BUILTIN_MASKMOVQ:
17135 case IX86_BUILTIN_MASKMOVDQU:
17136 icode = (fcode == IX86_BUILTIN_MASKMOVQ
17137 ? CODE_FOR_mmx_maskmovq
17138 : CODE_FOR_sse2_maskmovdqu);
17139 /* Note the arg order is different from the operand order. */
17140 arg1 = TREE_VALUE (arglist);
17141 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
17142 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17143 op0 = expand_normal (arg0);
17144 op1 = expand_normal (arg1);
17145 op2 = expand_normal (arg2);
17146 mode0 = insn_data[icode].operand[0].mode;
17147 mode1 = insn_data[icode].operand[1].mode;
17148 mode2 = insn_data[icode].operand[2].mode;
17150 op0 = force_reg (Pmode, op0);
17151 op0 = gen_rtx_MEM (mode1, op0);
17153 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
17154 op0 = copy_to_mode_reg (mode0, op0);
17155 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
17156 op1 = copy_to_mode_reg (mode1, op1);
17157 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
17158 op2 = copy_to_mode_reg (mode2, op2);
17159 pat = GEN_FCN (icode) (op0, op1, op2);
17165 case IX86_BUILTIN_SQRTSS:
17166 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
17167 case IX86_BUILTIN_RSQRTSS:
17168 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
17169 case IX86_BUILTIN_RCPSS:
17170 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
17172 case IX86_BUILTIN_LOADUPS:
17173 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
17175 case IX86_BUILTIN_STOREUPS:
17176 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
17178 case IX86_BUILTIN_LOADHPS:
17179 case IX86_BUILTIN_LOADLPS:
17180 case IX86_BUILTIN_LOADHPD:
17181 case IX86_BUILTIN_LOADLPD:
17182 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
17183 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
17184 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
17185 : CODE_FOR_sse2_loadlpd);
17186 arg0 = TREE_VALUE (arglist);
17187 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17188 op0 = expand_normal (arg0);
17189 op1 = expand_normal (arg1);
17190 tmode = insn_data[icode].operand[0].mode;
17191 mode0 = insn_data[icode].operand[1].mode;
17192 mode1 = insn_data[icode].operand[2].mode;
17194 op0 = force_reg (mode0, op0);
17195 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
17196 if (optimize || target == 0
17197 || GET_MODE (target) != tmode
17198 || !register_operand (target, tmode))
17199 target = gen_reg_rtx (tmode);
17200 pat = GEN_FCN (icode) (target, op0, op1);
17206 case IX86_BUILTIN_STOREHPS:
17207 case IX86_BUILTIN_STORELPS:
17208 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
17209 : CODE_FOR_sse_storelps);
17210 arg0 = TREE_VALUE (arglist);
17211 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17212 op0 = expand_normal (arg0);
17213 op1 = expand_normal (arg1);
17214 mode0 = insn_data[icode].operand[0].mode;
17215 mode1 = insn_data[icode].operand[1].mode;
17217 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17218 op1 = force_reg (mode1, op1);
17220 pat = GEN_FCN (icode) (op0, op1);
17226 case IX86_BUILTIN_MOVNTPS:
17227 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
17228 case IX86_BUILTIN_MOVNTQ:
17229 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
17231 case IX86_BUILTIN_LDMXCSR:
17232 op0 = expand_normal (TREE_VALUE (arglist));
17233 target = assign_386_stack_local (SImode, SLOT_TEMP);
17234 emit_move_insn (target, op0);
17235 emit_insn (gen_sse_ldmxcsr (target));
17238 case IX86_BUILTIN_STMXCSR:
17239 target = assign_386_stack_local (SImode, SLOT_TEMP);
17240 emit_insn (gen_sse_stmxcsr (target));
17241 return copy_to_mode_reg (SImode, target);
17243 case IX86_BUILTIN_SHUFPS:
17244 case IX86_BUILTIN_SHUFPD:
17245 icode = (fcode == IX86_BUILTIN_SHUFPS
17246 ? CODE_FOR_sse_shufps
17247 : CODE_FOR_sse2_shufpd);
17248 arg0 = TREE_VALUE (arglist);
17249 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17250 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17251 op0 = expand_normal (arg0);
17252 op1 = expand_normal (arg1);
17253 op2 = expand_normal (arg2);
17254 tmode = insn_data[icode].operand[0].mode;
17255 mode0 = insn_data[icode].operand[1].mode;
17256 mode1 = insn_data[icode].operand[2].mode;
17257 mode2 = insn_data[icode].operand[3].mode;
17259 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17260 op0 = copy_to_mode_reg (mode0, op0);
17261 if ((optimize && !register_operand (op1, mode1))
17262 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
17263 op1 = copy_to_mode_reg (mode1, op1);
17264 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
17266 /* @@@ better error message */
17267 error ("mask must be an immediate");
17268 return gen_reg_rtx (tmode);
17270 if (optimize || target == 0
17271 || GET_MODE (target) != tmode
17272 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17273 target = gen_reg_rtx (tmode);
17274 pat = GEN_FCN (icode) (target, op0, op1, op2);
17280 case IX86_BUILTIN_PSHUFW:
17281 case IX86_BUILTIN_PSHUFD:
17282 case IX86_BUILTIN_PSHUFHW:
17283 case IX86_BUILTIN_PSHUFLW:
17284 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
17285 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
17286 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
17287 : CODE_FOR_mmx_pshufw);
17288 arg0 = TREE_VALUE (arglist);
17289 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17290 op0 = expand_normal (arg0);
17291 op1 = expand_normal (arg1);
17292 tmode = insn_data[icode].operand[0].mode;
17293 mode1 = insn_data[icode].operand[1].mode;
17294 mode2 = insn_data[icode].operand[2].mode;
17296 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17297 op0 = copy_to_mode_reg (mode1, op0);
17298 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17300 /* @@@ better error message */
17301 error ("mask must be an immediate");
17305 || GET_MODE (target) != tmode
17306 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17307 target = gen_reg_rtx (tmode);
17308 pat = GEN_FCN (icode) (target, op0, op1);
17314 case IX86_BUILTIN_PSLLDQI128:
17315 case IX86_BUILTIN_PSRLDQI128:
17316 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
17317 : CODE_FOR_sse2_lshrti3);
17318 arg0 = TREE_VALUE (arglist);
17319 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17320 op0 = expand_normal (arg0);
17321 op1 = expand_normal (arg1);
17322 tmode = insn_data[icode].operand[0].mode;
17323 mode1 = insn_data[icode].operand[1].mode;
17324 mode2 = insn_data[icode].operand[2].mode;
17326 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17328 op0 = copy_to_reg (op0);
17329 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17331 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17333 error ("shift must be an immediate");
17336 target = gen_reg_rtx (V2DImode);
17337 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
17343 case IX86_BUILTIN_FEMMS:
17344 emit_insn (gen_mmx_femms ());
17347 case IX86_BUILTIN_PAVGUSB:
17348 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
17350 case IX86_BUILTIN_PF2ID:
17351 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
17353 case IX86_BUILTIN_PFACC:
17354 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
17356 case IX86_BUILTIN_PFADD:
17357 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
17359 case IX86_BUILTIN_PFCMPEQ:
17360 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
17362 case IX86_BUILTIN_PFCMPGE:
17363 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
17365 case IX86_BUILTIN_PFCMPGT:
17366 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
17368 case IX86_BUILTIN_PFMAX:
17369 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
17371 case IX86_BUILTIN_PFMIN:
17372 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
17374 case IX86_BUILTIN_PFMUL:
17375 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
17377 case IX86_BUILTIN_PFRCP:
17378 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
17380 case IX86_BUILTIN_PFRCPIT1:
17381 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
17383 case IX86_BUILTIN_PFRCPIT2:
17384 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
17386 case IX86_BUILTIN_PFRSQIT1:
17387 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
17389 case IX86_BUILTIN_PFRSQRT:
17390 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
17392 case IX86_BUILTIN_PFSUB:
17393 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
17395 case IX86_BUILTIN_PFSUBR:
17396 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
17398 case IX86_BUILTIN_PI2FD:
17399 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
17401 case IX86_BUILTIN_PMULHRW:
17402 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
17404 case IX86_BUILTIN_PF2IW:
17405 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
17407 case IX86_BUILTIN_PFNACC:
17408 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
17410 case IX86_BUILTIN_PFPNACC:
17411 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
17413 case IX86_BUILTIN_PI2FW:
17414 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
17416 case IX86_BUILTIN_PSWAPDSI:
17417 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
17419 case IX86_BUILTIN_PSWAPDSF:
17420 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
17422 case IX86_BUILTIN_SQRTSD:
17423 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
17424 case IX86_BUILTIN_LOADUPD:
17425 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
17426 case IX86_BUILTIN_STOREUPD:
17427 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
17429 case IX86_BUILTIN_MFENCE:
17430 emit_insn (gen_sse2_mfence ());
17432 case IX86_BUILTIN_LFENCE:
17433 emit_insn (gen_sse2_lfence ());
17436 case IX86_BUILTIN_CLFLUSH:
17437 arg0 = TREE_VALUE (arglist);
17438 op0 = expand_normal (arg0);
17439 icode = CODE_FOR_sse2_clflush;
17440 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
17441 op0 = copy_to_mode_reg (Pmode, op0);
17443 emit_insn (gen_sse2_clflush (op0));
17446 case IX86_BUILTIN_MOVNTPD:
17447 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
17448 case IX86_BUILTIN_MOVNTDQ:
17449 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
17450 case IX86_BUILTIN_MOVNTI:
17451 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
17453 case IX86_BUILTIN_LOADDQU:
17454 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
17455 case IX86_BUILTIN_STOREDQU:
17456 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
17458 case IX86_BUILTIN_MONITOR:
17459 arg0 = TREE_VALUE (arglist);
17460 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17461 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17462 op0 = expand_normal (arg0);
17463 op1 = expand_normal (arg1);
17464 op2 = expand_normal (arg2);
17466 op0 = copy_to_mode_reg (Pmode, op0);
17468 op1 = copy_to_mode_reg (SImode, op1);
17470 op2 = copy_to_mode_reg (SImode, op2);
17472 emit_insn (gen_sse3_monitor (op0, op1, op2));
17474 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
17477 case IX86_BUILTIN_MWAIT:
17478 arg0 = TREE_VALUE (arglist);
17479 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17480 op0 = expand_normal (arg0);
17481 op1 = expand_normal (arg1);
17483 op0 = copy_to_mode_reg (SImode, op0);
17485 op1 = copy_to_mode_reg (SImode, op1);
17486 emit_insn (gen_sse3_mwait (op0, op1));
17489 case IX86_BUILTIN_LDDQU:
17490 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
17493 case IX86_BUILTIN_PALIGNR:
17494 case IX86_BUILTIN_PALIGNR128:
17495 if (fcode == IX86_BUILTIN_PALIGNR)
17497 icode = CODE_FOR_ssse3_palignrdi;
17502 icode = CODE_FOR_ssse3_palignrti;
17505 arg0 = TREE_VALUE (arglist);
17506 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17507 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17508 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
17509 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
17510 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
17511 tmode = insn_data[icode].operand[0].mode;
17512 mode1 = insn_data[icode].operand[1].mode;
17513 mode2 = insn_data[icode].operand[2].mode;
17514 mode3 = insn_data[icode].operand[3].mode;
17516 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17518 op0 = copy_to_reg (op0);
17519 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17521 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17523 op1 = copy_to_reg (op1);
17524 op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
17526 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
17528 error ("shift must be an immediate");
17531 target = gen_reg_rtx (mode);
17532 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
17539 case IX86_BUILTIN_VEC_INIT_V2SI:
17540 case IX86_BUILTIN_VEC_INIT_V4HI:
17541 case IX86_BUILTIN_VEC_INIT_V8QI:
17542 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
17544 case IX86_BUILTIN_VEC_EXT_V2DF:
17545 case IX86_BUILTIN_VEC_EXT_V2DI:
17546 case IX86_BUILTIN_VEC_EXT_V4SF:
17547 case IX86_BUILTIN_VEC_EXT_V4SI:
17548 case IX86_BUILTIN_VEC_EXT_V8HI:
17549 case IX86_BUILTIN_VEC_EXT_V2SI:
17550 case IX86_BUILTIN_VEC_EXT_V4HI:
17551 return ix86_expand_vec_ext_builtin (arglist, target);
17553 case IX86_BUILTIN_VEC_SET_V8HI:
17554 case IX86_BUILTIN_VEC_SET_V4HI:
17555 return ix86_expand_vec_set_builtin (arglist);
17561 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17562 if (d->code == fcode)
17564 /* Compares are treated specially. */
17565 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
17566 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
17567 || d->icode == CODE_FOR_sse2_maskcmpv2df3
17568 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
17569 return ix86_expand_sse_compare (d, arglist, target);
17571 return ix86_expand_binop_builtin (d->icode, arglist, target);
17574 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17575 if (d->code == fcode)
17576 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
17578 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
17579 if (d->code == fcode)
17580 return ix86_expand_sse_comi (d, arglist, target);
17582 gcc_unreachable ();
17585 /* Returns a function decl for a vectorized version of the builtin function
17586 with builtin function code FN and the result vector type TYPE, or NULL_TREE
17587 if it is not available. */
17590 ix86_builtin_vectorized_function (enum built_in_function fn, tree type)
17592 enum machine_mode el_mode;
17595 if (TREE_CODE (type) != VECTOR_TYPE)
17598 el_mode = TYPE_MODE (TREE_TYPE (type));
17599 n = TYPE_VECTOR_SUBPARTS (type);
17603 case BUILT_IN_SQRT:
17604 if (el_mode == DFmode && n == 2)
17605 return ix86_builtins[IX86_BUILTIN_SQRTPD];
17608 case BUILT_IN_SQRTF:
17609 if (el_mode == SFmode && n == 4)
17610 return ix86_builtins[IX86_BUILTIN_SQRTPS];
17620 /* Store OPERAND to the memory after reload is completed. This means
17621 that we can't easily use assign_stack_local. */
17623 ix86_force_to_memory (enum machine_mode mode, rtx operand)
17627 gcc_assert (reload_completed);
17628 if (TARGET_RED_ZONE)
17630 result = gen_rtx_MEM (mode,
17631 gen_rtx_PLUS (Pmode,
17633 GEN_INT (-RED_ZONE_SIZE)));
17634 emit_move_insn (result, operand);
17636 else if (!TARGET_RED_ZONE && TARGET_64BIT)
17642 operand = gen_lowpart (DImode, operand);
17646 gen_rtx_SET (VOIDmode,
17647 gen_rtx_MEM (DImode,
17648 gen_rtx_PRE_DEC (DImode,
17649 stack_pointer_rtx)),
17653 gcc_unreachable ();
17655 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17664 split_di (&operand, 1, operands, operands + 1);
17666 gen_rtx_SET (VOIDmode,
17667 gen_rtx_MEM (SImode,
17668 gen_rtx_PRE_DEC (Pmode,
17669 stack_pointer_rtx)),
17672 gen_rtx_SET (VOIDmode,
17673 gen_rtx_MEM (SImode,
17674 gen_rtx_PRE_DEC (Pmode,
17675 stack_pointer_rtx)),
17680 /* Store HImodes as SImodes. */
17681 operand = gen_lowpart (SImode, operand);
17685 gen_rtx_SET (VOIDmode,
17686 gen_rtx_MEM (GET_MODE (operand),
17687 gen_rtx_PRE_DEC (SImode,
17688 stack_pointer_rtx)),
17692 gcc_unreachable ();
17694 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17699 /* Free operand from the memory. */
17701 ix86_free_from_memory (enum machine_mode mode)
17703 if (!TARGET_RED_ZONE)
17707 if (mode == DImode || TARGET_64BIT)
17711 /* Use LEA to deallocate stack space. In peephole2 it will be converted
17712 to pop or add instruction if registers are available. */
17713 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
17714 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
17719 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
17720 QImode must go into class Q_REGS.
17721 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
17722 movdf to do mem-to-mem moves through integer regs. */
17724 ix86_preferred_reload_class (rtx x, enum reg_class class)
17726 enum machine_mode mode = GET_MODE (x);
17728 /* We're only allowed to return a subclass of CLASS. Many of the
17729 following checks fail for NO_REGS, so eliminate that early. */
17730 if (class == NO_REGS)
17733 /* All classes can load zeros. */
17734 if (x == CONST0_RTX (mode))
17737 /* Force constants into memory if we are loading a (nonzero) constant into
17738 an MMX or SSE register. This is because there are no MMX/SSE instructions
17739 to load from a constant. */
17741 && (MAYBE_MMX_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)))
17744 /* Prefer SSE regs only, if we can use them for math. */
17745 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
17746 return SSE_CLASS_P (class) ? class : NO_REGS;
17748 /* Floating-point constants need more complex checks. */
17749 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
17751 /* General regs can load everything. */
17752 if (reg_class_subset_p (class, GENERAL_REGS))
17755 /* Floats can load 0 and 1 plus some others. Note that we eliminated
17756 zero above. We only want to wind up preferring 80387 registers if
17757 we plan on doing computation with them. */
17759 && standard_80387_constant_p (x))
17761 /* Limit class to non-sse. */
17762 if (class == FLOAT_SSE_REGS)
17764 if (class == FP_TOP_SSE_REGS)
17766 if (class == FP_SECOND_SSE_REGS)
17767 return FP_SECOND_REG;
17768 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
17775 /* Generally when we see PLUS here, it's the function invariant
17776 (plus soft-fp const_int). Which can only be computed into general
17778 if (GET_CODE (x) == PLUS)
17779 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
17781 /* QImode constants are easy to load, but non-constant QImode data
17782 must go into Q_REGS. */
17783 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
17785 if (reg_class_subset_p (class, Q_REGS))
17787 if (reg_class_subset_p (Q_REGS, class))
17795 /* Discourage putting floating-point values in SSE registers unless
17796 SSE math is being used, and likewise for the 387 registers. */
17798 ix86_preferred_output_reload_class (rtx x, enum reg_class class)
17800 enum machine_mode mode = GET_MODE (x);
17802 /* Restrict the output reload class to the register bank that we are doing
17803 math on. If we would like not to return a subset of CLASS, reject this
17804 alternative: if reload cannot do this, it will still use its choice. */
17805 mode = GET_MODE (x);
17806 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
17807 return MAYBE_SSE_CLASS_P (class) ? SSE_REGS : NO_REGS;
17809 if (TARGET_80387 && SCALAR_FLOAT_MODE_P (mode))
17811 if (class == FP_TOP_SSE_REGS)
17813 else if (class == FP_SECOND_SSE_REGS)
17814 return FP_SECOND_REG;
17816 return FLOAT_CLASS_P (class) ? class : NO_REGS;
17822 /* If we are copying between general and FP registers, we need a memory
17823 location. The same is true for SSE and MMX registers.
17825 The macro can't work reliably when one of the CLASSES is class containing
17826 registers from multiple units (SSE, MMX, integer). We avoid this by never
17827 combining those units in single alternative in the machine description.
17828 Ensure that this constraint holds to avoid unexpected surprises.
17830 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
17831 enforce these sanity checks. */
17834 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
17835 enum machine_mode mode, int strict)
17837 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
17838 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
17839 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
17840 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
17841 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
17842 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
17844 gcc_assert (!strict);
17848 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
17851 /* ??? This is a lie. We do have moves between mmx/general, and for
17852 mmx/sse2. But by saying we need secondary memory we discourage the
17853 register allocator from using the mmx registers unless needed. */
17854 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
17857 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17859 /* SSE1 doesn't have any direct moves from other classes. */
17863 /* If the target says that inter-unit moves are more expensive
17864 than moving through memory, then don't generate them. */
17865 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
17868 /* Between SSE and general, we have moves no larger than word size. */
17869 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
17872 /* ??? For the cost of one register reformat penalty, we could use
17873 the same instructions to move SFmode and DFmode data, but the
17874 relevant move patterns don't support those alternatives. */
17875 if (mode == SFmode || mode == DFmode)
17882 /* Return true if the registers in CLASS cannot represent the change from
17883 modes FROM to TO. */
17886 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
17887 enum reg_class class)
17892 /* x87 registers can't do subreg at all, as all values are reformatted
17893 to extended precision. */
17894 if (MAYBE_FLOAT_CLASS_P (class))
17897 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
17899 /* Vector registers do not support QI or HImode loads. If we don't
17900 disallow a change to these modes, reload will assume it's ok to
17901 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
17902 the vec_dupv4hi pattern. */
17903 if (GET_MODE_SIZE (from) < 4)
17906 /* Vector registers do not support subreg with nonzero offsets, which
17907 are otherwise valid for integer registers. Since we can't see
17908 whether we have a nonzero offset from here, prohibit all
17909 nonparadoxical subregs changing size. */
17910 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
17917 /* Return the cost of moving data from a register in class CLASS1 to
17918 one in class CLASS2.
17920 It is not required that the cost always equal 2 when FROM is the same as TO;
17921 on some machines it is expensive to move between registers if they are not
17922 general registers. */
17925 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
17926 enum reg_class class2)
17928 /* In case we require secondary memory, compute cost of the store followed
17929 by load. In order to avoid bad register allocation choices, we need
17930 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
17932 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
17936 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
17937 MEMORY_MOVE_COST (mode, class1, 1));
17938 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
17939 MEMORY_MOVE_COST (mode, class2, 1));
17941 /* In case of copying from general_purpose_register we may emit multiple
17942 stores followed by single load causing memory size mismatch stall.
17943 Count this as arbitrarily high cost of 20. */
17944 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
17947 /* In the case of FP/MMX moves, the registers actually overlap, and we
17948 have to switch modes in order to treat them differently. */
17949 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
17950 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
17956 /* Moves between SSE/MMX and integer unit are expensive. */
17957 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
17958 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17959 return ix86_cost->mmxsse_to_integer;
17960 if (MAYBE_FLOAT_CLASS_P (class1))
17961 return ix86_cost->fp_move;
17962 if (MAYBE_SSE_CLASS_P (class1))
17963 return ix86_cost->sse_move;
17964 if (MAYBE_MMX_CLASS_P (class1))
17965 return ix86_cost->mmx_move;
17969 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
17972 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
17974 /* Flags and only flags can only hold CCmode values. */
17975 if (CC_REGNO_P (regno))
17976 return GET_MODE_CLASS (mode) == MODE_CC;
17977 if (GET_MODE_CLASS (mode) == MODE_CC
17978 || GET_MODE_CLASS (mode) == MODE_RANDOM
17979 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
17981 if (FP_REGNO_P (regno))
17982 return VALID_FP_MODE_P (mode);
17983 if (SSE_REGNO_P (regno))
17985 /* We implement the move patterns for all vector modes into and
17986 out of SSE registers, even when no operation instructions
17988 return (VALID_SSE_REG_MODE (mode)
17989 || VALID_SSE2_REG_MODE (mode)
17990 || VALID_MMX_REG_MODE (mode)
17991 || VALID_MMX_REG_MODE_3DNOW (mode));
17993 if (MMX_REGNO_P (regno))
17995 /* We implement the move patterns for 3DNOW modes even in MMX mode,
17996 so if the register is available at all, then we can move data of
17997 the given mode into or out of it. */
17998 return (VALID_MMX_REG_MODE (mode)
17999 || VALID_MMX_REG_MODE_3DNOW (mode));
18002 if (mode == QImode)
18004 /* Take care for QImode values - they can be in non-QI regs,
18005 but then they do cause partial register stalls. */
18006 if (regno < 4 || TARGET_64BIT)
18008 if (!TARGET_PARTIAL_REG_STALL)
18010 return reload_in_progress || reload_completed;
18012 /* We handle both integer and floats in the general purpose registers. */
18013 else if (VALID_INT_MODE_P (mode))
18015 else if (VALID_FP_MODE_P (mode))
18017 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
18018 on to use that value in smaller contexts, this can easily force a
18019 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
18020 supporting DImode, allow it. */
18021 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
18027 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
18028 tieable integer mode. */
18031 ix86_tieable_integer_mode_p (enum machine_mode mode)
18040 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
18043 return TARGET_64BIT;
18050 /* Return true if MODE1 is accessible in a register that can hold MODE2
18051 without copying. That is, all register classes that can hold MODE2
18052 can also hold MODE1. */
18055 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
18057 if (mode1 == mode2)
18060 if (ix86_tieable_integer_mode_p (mode1)
18061 && ix86_tieable_integer_mode_p (mode2))
18064 /* MODE2 being XFmode implies fp stack or general regs, which means we
18065 can tie any smaller floating point modes to it. Note that we do not
18066 tie this with TFmode. */
18067 if (mode2 == XFmode)
18068 return mode1 == SFmode || mode1 == DFmode;
18070 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
18071 that we can tie it with SFmode. */
18072 if (mode2 == DFmode)
18073 return mode1 == SFmode;
18075 /* If MODE2 is only appropriate for an SSE register, then tie with
18076 any other mode acceptable to SSE registers. */
18077 if (GET_MODE_SIZE (mode2) >= 8
18078 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
18079 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
18081 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
18082 with any other mode acceptable to MMX registers. */
18083 if (GET_MODE_SIZE (mode2) == 8
18084 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
18085 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
18090 /* Return the cost of moving data of mode M between a
18091 register and memory. A value of 2 is the default; this cost is
18092 relative to those in `REGISTER_MOVE_COST'.
18094 If moving between registers and memory is more expensive than
18095 between two registers, you should define this macro to express the
18098 Model also increased moving costs of QImode registers in non
18102 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
18104 if (FLOAT_CLASS_P (class))
18121 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
18123 if (SSE_CLASS_P (class))
18126 switch (GET_MODE_SIZE (mode))
18140 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
18142 if (MMX_CLASS_P (class))
18145 switch (GET_MODE_SIZE (mode))
18156 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
18158 switch (GET_MODE_SIZE (mode))
18162 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
18163 : ix86_cost->movzbl_load);
18165 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
18166 : ix86_cost->int_store[0] + 4);
18169 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
18171 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
18172 if (mode == TFmode)
18174 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
18175 * (((int) GET_MODE_SIZE (mode)
18176 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
18180 /* Compute a (partial) cost for rtx X. Return true if the complete
18181 cost has been computed, and false if subexpressions should be
18182 scanned. In either case, *TOTAL contains the cost result. */
18185 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
18187 enum machine_mode mode = GET_MODE (x);
18195 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
18197 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
18199 else if (flag_pic && SYMBOLIC_CONST (x)
18201 || (!GET_CODE (x) != LABEL_REF
18202 && (GET_CODE (x) != SYMBOL_REF
18203 || !SYMBOL_REF_LOCAL_P (x)))))
18210 if (mode == VOIDmode)
18213 switch (standard_80387_constant_p (x))
18218 default: /* Other constants */
18223 /* Start with (MEM (SYMBOL_REF)), since that's where
18224 it'll probably end up. Add a penalty for size. */
18225 *total = (COSTS_N_INSNS (1)
18226 + (flag_pic != 0 && !TARGET_64BIT)
18227 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
18233 /* The zero extensions is often completely free on x86_64, so make
18234 it as cheap as possible. */
18235 if (TARGET_64BIT && mode == DImode
18236 && GET_MODE (XEXP (x, 0)) == SImode)
18238 else if (TARGET_ZERO_EXTEND_WITH_AND)
18239 *total = ix86_cost->add;
18241 *total = ix86_cost->movzx;
18245 *total = ix86_cost->movsx;
18249 if (GET_CODE (XEXP (x, 1)) == CONST_INT
18250 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
18252 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18255 *total = ix86_cost->add;
18258 if ((value == 2 || value == 3)
18259 && ix86_cost->lea <= ix86_cost->shift_const)
18261 *total = ix86_cost->lea;
18271 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
18273 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18275 if (INTVAL (XEXP (x, 1)) > 32)
18276 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
18278 *total = ix86_cost->shift_const * 2;
18282 if (GET_CODE (XEXP (x, 1)) == AND)
18283 *total = ix86_cost->shift_var * 2;
18285 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
18290 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18291 *total = ix86_cost->shift_const;
18293 *total = ix86_cost->shift_var;
18298 if (FLOAT_MODE_P (mode))
18300 *total = ix86_cost->fmul;
18305 rtx op0 = XEXP (x, 0);
18306 rtx op1 = XEXP (x, 1);
18308 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18310 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18311 for (nbits = 0; value != 0; value &= value - 1)
18315 /* This is arbitrary. */
18318 /* Compute costs correctly for widening multiplication. */
18319 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
18320 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
18321 == GET_MODE_SIZE (mode))
18323 int is_mulwiden = 0;
18324 enum machine_mode inner_mode = GET_MODE (op0);
18326 if (GET_CODE (op0) == GET_CODE (op1))
18327 is_mulwiden = 1, op1 = XEXP (op1, 0);
18328 else if (GET_CODE (op1) == CONST_INT)
18330 if (GET_CODE (op0) == SIGN_EXTEND)
18331 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
18334 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
18338 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
18341 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
18342 + nbits * ix86_cost->mult_bit
18343 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
18352 if (FLOAT_MODE_P (mode))
18353 *total = ix86_cost->fdiv;
18355 *total = ix86_cost->divide[MODE_INDEX (mode)];
18359 if (FLOAT_MODE_P (mode))
18360 *total = ix86_cost->fadd;
18361 else if (GET_MODE_CLASS (mode) == MODE_INT
18362 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
18364 if (GET_CODE (XEXP (x, 0)) == PLUS
18365 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
18366 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
18367 && CONSTANT_P (XEXP (x, 1)))
18369 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
18370 if (val == 2 || val == 4 || val == 8)
18372 *total = ix86_cost->lea;
18373 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18374 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
18376 *total += rtx_cost (XEXP (x, 1), outer_code);
18380 else if (GET_CODE (XEXP (x, 0)) == MULT
18381 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
18383 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
18384 if (val == 2 || val == 4 || val == 8)
18386 *total = ix86_cost->lea;
18387 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18388 *total += rtx_cost (XEXP (x, 1), outer_code);
18392 else if (GET_CODE (XEXP (x, 0)) == PLUS)
18394 *total = ix86_cost->lea;
18395 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18396 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18397 *total += rtx_cost (XEXP (x, 1), outer_code);
18404 if (FLOAT_MODE_P (mode))
18406 *total = ix86_cost->fadd;
18414 if (!TARGET_64BIT && mode == DImode)
18416 *total = (ix86_cost->add * 2
18417 + (rtx_cost (XEXP (x, 0), outer_code)
18418 << (GET_MODE (XEXP (x, 0)) != DImode))
18419 + (rtx_cost (XEXP (x, 1), outer_code)
18420 << (GET_MODE (XEXP (x, 1)) != DImode)));
18426 if (FLOAT_MODE_P (mode))
18428 *total = ix86_cost->fchs;
18434 if (!TARGET_64BIT && mode == DImode)
18435 *total = ix86_cost->add * 2;
18437 *total = ix86_cost->add;
18441 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
18442 && XEXP (XEXP (x, 0), 1) == const1_rtx
18443 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
18444 && XEXP (x, 1) == const0_rtx)
18446 /* This kind of construct is implemented using test[bwl].
18447 Treat it as if we had an AND. */
18448 *total = (ix86_cost->add
18449 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
18450 + rtx_cost (const1_rtx, outer_code));
18456 if (!TARGET_SSE_MATH
18458 || (mode == DFmode && !TARGET_SSE2))
18463 if (FLOAT_MODE_P (mode))
18464 *total = ix86_cost->fabs;
18468 if (FLOAT_MODE_P (mode))
18469 *total = ix86_cost->fsqrt;
18473 if (XINT (x, 1) == UNSPEC_TP)
18484 static int current_machopic_label_num;
18486 /* Given a symbol name and its associated stub, write out the
18487 definition of the stub. */
18490 machopic_output_stub (FILE *file, const char *symb, const char *stub)
18492 unsigned int length;
18493 char *binder_name, *symbol_name, lazy_ptr_name[32];
18494 int label = ++current_machopic_label_num;
18496 /* For 64-bit we shouldn't get here. */
18497 gcc_assert (!TARGET_64BIT);
18499 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
18500 symb = (*targetm.strip_name_encoding) (symb);
18502 length = strlen (stub);
18503 binder_name = alloca (length + 32);
18504 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
18506 length = strlen (symb);
18507 symbol_name = alloca (length + 32);
18508 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
18510 sprintf (lazy_ptr_name, "L%d$lz", label);
18513 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
18515 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
18517 fprintf (file, "%s:\n", stub);
18518 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18522 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
18523 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
18524 fprintf (file, "\tjmp\t*%%edx\n");
18527 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
18529 fprintf (file, "%s:\n", binder_name);
18533 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
18534 fprintf (file, "\tpushl\t%%eax\n");
18537 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
18539 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
18541 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
18542 fprintf (file, "%s:\n", lazy_ptr_name);
18543 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18544 fprintf (file, "\t.long %s\n", binder_name);
18548 darwin_x86_file_end (void)
18550 darwin_file_end ();
18553 #endif /* TARGET_MACHO */
18555 /* Order the registers for register allocator. */
18558 x86_order_regs_for_local_alloc (void)
18563 /* First allocate the local general purpose registers. */
18564 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18565 if (GENERAL_REGNO_P (i) && call_used_regs[i])
18566 reg_alloc_order [pos++] = i;
18568 /* Global general purpose registers. */
18569 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18570 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
18571 reg_alloc_order [pos++] = i;
18573 /* x87 registers come first in case we are doing FP math
18575 if (!TARGET_SSE_MATH)
18576 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18577 reg_alloc_order [pos++] = i;
18579 /* SSE registers. */
18580 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
18581 reg_alloc_order [pos++] = i;
18582 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
18583 reg_alloc_order [pos++] = i;
18585 /* x87 registers. */
18586 if (TARGET_SSE_MATH)
18587 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18588 reg_alloc_order [pos++] = i;
18590 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
18591 reg_alloc_order [pos++] = i;
18593 /* Initialize the rest of array as we do not allocate some registers
18595 while (pos < FIRST_PSEUDO_REGISTER)
18596 reg_alloc_order [pos++] = 0;
18599 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
18600 struct attribute_spec.handler. */
18602 ix86_handle_struct_attribute (tree *node, tree name,
18603 tree args ATTRIBUTE_UNUSED,
18604 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
18607 if (DECL_P (*node))
18609 if (TREE_CODE (*node) == TYPE_DECL)
18610 type = &TREE_TYPE (*node);
18615 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
18616 || TREE_CODE (*type) == UNION_TYPE)))
18618 warning (OPT_Wattributes, "%qs attribute ignored",
18619 IDENTIFIER_POINTER (name));
18620 *no_add_attrs = true;
18623 else if ((is_attribute_p ("ms_struct", name)
18624 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
18625 || ((is_attribute_p ("gcc_struct", name)
18626 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
18628 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
18629 IDENTIFIER_POINTER (name));
18630 *no_add_attrs = true;
18637 ix86_ms_bitfield_layout_p (tree record_type)
18639 return (TARGET_MS_BITFIELD_LAYOUT &&
18640 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
18641 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
18644 /* Returns an expression indicating where the this parameter is
18645 located on entry to the FUNCTION. */
18648 x86_this_parameter (tree function)
18650 tree type = TREE_TYPE (function);
18654 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
18655 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
18658 if (ix86_function_regparm (type, function) > 0)
18662 parm = TYPE_ARG_TYPES (type);
18663 /* Figure out whether or not the function has a variable number of
18665 for (; parm; parm = TREE_CHAIN (parm))
18666 if (TREE_VALUE (parm) == void_type_node)
18668 /* If not, the this parameter is in the first argument. */
18672 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
18674 return gen_rtx_REG (SImode, regno);
18678 if (aggregate_value_p (TREE_TYPE (type), type))
18679 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
18681 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
18684 /* Determine whether x86_output_mi_thunk can succeed. */
18687 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
18688 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
18689 HOST_WIDE_INT vcall_offset, tree function)
18691 /* 64-bit can handle anything. */
18695 /* For 32-bit, everything's fine if we have one free register. */
18696 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
18699 /* Need a free register for vcall_offset. */
18703 /* Need a free register for GOT references. */
18704 if (flag_pic && !(*targetm.binds_local_p) (function))
18707 /* Otherwise ok. */
18711 /* Output the assembler code for a thunk function. THUNK_DECL is the
18712 declaration for the thunk function itself, FUNCTION is the decl for
18713 the target function. DELTA is an immediate constant offset to be
18714 added to THIS. If VCALL_OFFSET is nonzero, the word at
18715 *(*this + vcall_offset) should be added to THIS. */
18718 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
18719 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
18720 HOST_WIDE_INT vcall_offset, tree function)
18723 rtx this = x86_this_parameter (function);
18726 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
18727 pull it in now and let DELTA benefit. */
18730 else if (vcall_offset)
18732 /* Put the this parameter into %eax. */
18734 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
18735 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18738 this_reg = NULL_RTX;
18740 /* Adjust the this parameter by a fixed constant. */
18743 xops[0] = GEN_INT (delta);
18744 xops[1] = this_reg ? this_reg : this;
18747 if (!x86_64_general_operand (xops[0], DImode))
18749 tmp = gen_rtx_REG (DImode, R10_REG);
18751 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
18755 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18758 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18761 /* Adjust the this parameter by a value stored in the vtable. */
18765 tmp = gen_rtx_REG (DImode, R10_REG);
18768 int tmp_regno = 2 /* ECX */;
18769 if (lookup_attribute ("fastcall",
18770 TYPE_ATTRIBUTES (TREE_TYPE (function))))
18771 tmp_regno = 0 /* EAX */;
18772 tmp = gen_rtx_REG (SImode, tmp_regno);
18775 xops[0] = gen_rtx_MEM (Pmode, this_reg);
18778 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18780 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18782 /* Adjust the this parameter. */
18783 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
18784 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
18786 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
18787 xops[0] = GEN_INT (vcall_offset);
18789 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18790 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
18792 xops[1] = this_reg;
18794 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18796 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18799 /* If necessary, drop THIS back to its stack slot. */
18800 if (this_reg && this_reg != this)
18802 xops[0] = this_reg;
18804 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18807 xops[0] = XEXP (DECL_RTL (function), 0);
18810 if (!flag_pic || (*targetm.binds_local_p) (function))
18811 output_asm_insn ("jmp\t%P0", xops);
18814 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
18815 tmp = gen_rtx_CONST (Pmode, tmp);
18816 tmp = gen_rtx_MEM (QImode, tmp);
18818 output_asm_insn ("jmp\t%A0", xops);
18823 if (!flag_pic || (*targetm.binds_local_p) (function))
18824 output_asm_insn ("jmp\t%P0", xops);
18829 rtx sym_ref = XEXP (DECL_RTL (function), 0);
18830 tmp = (gen_rtx_SYMBOL_REF
18832 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
18833 tmp = gen_rtx_MEM (QImode, tmp);
18835 output_asm_insn ("jmp\t%0", xops);
18838 #endif /* TARGET_MACHO */
18840 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
18841 output_set_got (tmp, NULL_RTX);
18844 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
18845 output_asm_insn ("jmp\t{*}%1", xops);
18851 x86_file_start (void)
18853 default_file_start ();
18855 darwin_file_start ();
18857 if (X86_FILE_START_VERSION_DIRECTIVE)
18858 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
18859 if (X86_FILE_START_FLTUSED)
18860 fputs ("\t.global\t__fltused\n", asm_out_file);
18861 if (ix86_asm_dialect == ASM_INTEL)
18862 fputs ("\t.intel_syntax\n", asm_out_file);
18866 x86_field_alignment (tree field, int computed)
18868 enum machine_mode mode;
18869 tree type = TREE_TYPE (field);
18871 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
18873 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
18874 ? get_inner_array_type (type) : type);
18875 if (mode == DFmode || mode == DCmode
18876 || GET_MODE_CLASS (mode) == MODE_INT
18877 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
18878 return MIN (32, computed);
18882 /* Output assembler code to FILE to increment profiler label # LABELNO
18883 for profiling a function entry. */
18885 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
18890 #ifndef NO_PROFILE_COUNTERS
18891 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
18893 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
18897 #ifndef NO_PROFILE_COUNTERS
18898 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
18900 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18904 #ifndef NO_PROFILE_COUNTERS
18905 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
18906 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
18908 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
18912 #ifndef NO_PROFILE_COUNTERS
18913 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
18914 PROFILE_COUNT_REGISTER);
18916 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18920 /* We don't have exact information about the insn sizes, but we may assume
18921 quite safely that we are informed about all 1 byte insns and memory
18922 address sizes. This is enough to eliminate unnecessary padding in
18926 min_insn_size (rtx insn)
18930 if (!INSN_P (insn) || !active_insn_p (insn))
18933 /* Discard alignments we've emit and jump instructions. */
18934 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
18935 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
18937 if (GET_CODE (insn) == JUMP_INSN
18938 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
18939 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
18942 /* Important case - calls are always 5 bytes.
18943 It is common to have many calls in the row. */
18944 if (GET_CODE (insn) == CALL_INSN
18945 && symbolic_reference_mentioned_p (PATTERN (insn))
18946 && !SIBLING_CALL_P (insn))
18948 if (get_attr_length (insn) <= 1)
18951 /* For normal instructions we may rely on the sizes of addresses
18952 and the presence of symbol to require 4 bytes of encoding.
18953 This is not the case for jumps where references are PC relative. */
18954 if (GET_CODE (insn) != JUMP_INSN)
18956 l = get_attr_length_address (insn);
18957 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
18966 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
18970 ix86_avoid_jump_misspredicts (void)
18972 rtx insn, start = get_insns ();
18973 int nbytes = 0, njumps = 0;
18976 /* Look for all minimal intervals of instructions containing 4 jumps.
18977 The intervals are bounded by START and INSN. NBYTES is the total
18978 size of instructions in the interval including INSN and not including
18979 START. When the NBYTES is smaller than 16 bytes, it is possible
18980 that the end of START and INSN ends up in the same 16byte page.
18982 The smallest offset in the page INSN can start is the case where START
18983 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
18984 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
18986 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18989 nbytes += min_insn_size (insn);
18991 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
18992 INSN_UID (insn), min_insn_size (insn));
18993 if ((GET_CODE (insn) == JUMP_INSN
18994 && GET_CODE (PATTERN (insn)) != ADDR_VEC
18995 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
18996 || GET_CODE (insn) == CALL_INSN)
19003 start = NEXT_INSN (start);
19004 if ((GET_CODE (start) == JUMP_INSN
19005 && GET_CODE (PATTERN (start)) != ADDR_VEC
19006 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
19007 || GET_CODE (start) == CALL_INSN)
19008 njumps--, isjump = 1;
19011 nbytes -= min_insn_size (start);
19013 gcc_assert (njumps >= 0);
19015 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
19016 INSN_UID (start), INSN_UID (insn), nbytes);
19018 if (njumps == 3 && isjump && nbytes < 16)
19020 int padsize = 15 - nbytes + min_insn_size (insn);
19023 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
19024 INSN_UID (insn), padsize);
19025 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
19030 /* AMD Athlon works faster
19031 when RET is not destination of conditional jump or directly preceded
19032 by other jump instruction. We avoid the penalty by inserting NOP just
19033 before the RET instructions in such cases. */
19035 ix86_pad_returns (void)
19040 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
19042 basic_block bb = e->src;
19043 rtx ret = BB_END (bb);
19045 bool replace = false;
19047 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
19048 || !maybe_hot_bb_p (bb))
19050 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
19051 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
19053 if (prev && GET_CODE (prev) == CODE_LABEL)
19058 FOR_EACH_EDGE (e, ei, bb->preds)
19059 if (EDGE_FREQUENCY (e) && e->src->index >= 0
19060 && !(e->flags & EDGE_FALLTHRU))
19065 prev = prev_active_insn (ret);
19067 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
19068 || GET_CODE (prev) == CALL_INSN))
19070 /* Empty functions get branch mispredict even when the jump destination
19071 is not visible to us. */
19072 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
19077 emit_insn_before (gen_return_internal_long (), ret);
19083 /* Implement machine specific optimizations. We implement padding of returns
19084 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
19088 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
19089 ix86_pad_returns ();
19090 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
19091 ix86_avoid_jump_misspredicts ();
19094 /* Return nonzero when QImode register that must be represented via REX prefix
19097 x86_extended_QIreg_mentioned_p (rtx insn)
19100 extract_insn_cached (insn);
19101 for (i = 0; i < recog_data.n_operands; i++)
19102 if (REG_P (recog_data.operand[i])
19103 && REGNO (recog_data.operand[i]) >= 4)
19108 /* Return nonzero when P points to register encoded via REX prefix.
19109 Called via for_each_rtx. */
19111 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
19113 unsigned int regno;
19116 regno = REGNO (*p);
19117 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
19120 /* Return true when INSN mentions register that must be encoded using REX
19123 x86_extended_reg_mentioned_p (rtx insn)
19125 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
19128 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
19129 optabs would emit if we didn't have TFmode patterns. */
19132 x86_emit_floatuns (rtx operands[2])
19134 rtx neglab, donelab, i0, i1, f0, in, out;
19135 enum machine_mode mode, inmode;
19137 inmode = GET_MODE (operands[1]);
19138 gcc_assert (inmode == SImode || inmode == DImode);
19141 in = force_reg (inmode, operands[1]);
19142 mode = GET_MODE (out);
19143 neglab = gen_label_rtx ();
19144 donelab = gen_label_rtx ();
19145 i1 = gen_reg_rtx (Pmode);
19146 f0 = gen_reg_rtx (mode);
19148 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
19150 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
19151 emit_jump_insn (gen_jump (donelab));
19154 emit_label (neglab);
19156 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19157 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19158 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
19159 expand_float (f0, i0, 0);
19160 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
19162 emit_label (donelab);
19165 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19166 with all elements equal to VAR. Return true if successful. */
19169 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
19170 rtx target, rtx val)
19172 enum machine_mode smode, wsmode, wvmode;
19187 val = force_reg (GET_MODE_INNER (mode), val);
19188 x = gen_rtx_VEC_DUPLICATE (mode, val);
19189 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19195 if (TARGET_SSE || TARGET_3DNOW_A)
19197 val = gen_lowpart (SImode, val);
19198 x = gen_rtx_TRUNCATE (HImode, val);
19199 x = gen_rtx_VEC_DUPLICATE (mode, x);
19200 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19222 /* Extend HImode to SImode using a paradoxical SUBREG. */
19223 tmp1 = gen_reg_rtx (SImode);
19224 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19225 /* Insert the SImode value as low element of V4SImode vector. */
19226 tmp2 = gen_reg_rtx (V4SImode);
19227 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19228 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19229 CONST0_RTX (V4SImode),
19231 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19232 /* Cast the V4SImode vector back to a V8HImode vector. */
19233 tmp1 = gen_reg_rtx (V8HImode);
19234 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
19235 /* Duplicate the low short through the whole low SImode word. */
19236 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
19237 /* Cast the V8HImode vector back to a V4SImode vector. */
19238 tmp2 = gen_reg_rtx (V4SImode);
19239 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19240 /* Replicate the low element of the V4SImode vector. */
19241 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19242 /* Cast the V2SImode back to V8HImode, and store in target. */
19243 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
19254 /* Extend QImode to SImode using a paradoxical SUBREG. */
19255 tmp1 = gen_reg_rtx (SImode);
19256 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19257 /* Insert the SImode value as low element of V4SImode vector. */
19258 tmp2 = gen_reg_rtx (V4SImode);
19259 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19260 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19261 CONST0_RTX (V4SImode),
19263 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19264 /* Cast the V4SImode vector back to a V16QImode vector. */
19265 tmp1 = gen_reg_rtx (V16QImode);
19266 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
19267 /* Duplicate the low byte through the whole low SImode word. */
19268 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19269 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19270 /* Cast the V16QImode vector back to a V4SImode vector. */
19271 tmp2 = gen_reg_rtx (V4SImode);
19272 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19273 /* Replicate the low element of the V4SImode vector. */
19274 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19275 /* Cast the V2SImode back to V16QImode, and store in target. */
19276 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
19284 /* Replicate the value once into the next wider mode and recurse. */
19285 val = convert_modes (wsmode, smode, val, true);
19286 x = expand_simple_binop (wsmode, ASHIFT, val,
19287 GEN_INT (GET_MODE_BITSIZE (smode)),
19288 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19289 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
19291 x = gen_reg_rtx (wvmode);
19292 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
19293 gcc_unreachable ();
19294 emit_move_insn (target, gen_lowpart (mode, x));
19302 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19303 whose ONE_VAR element is VAR, and other elements are zero. Return true
19307 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
19308 rtx target, rtx var, int one_var)
19310 enum machine_mode vsimode;
19326 var = force_reg (GET_MODE_INNER (mode), var);
19327 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
19328 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19333 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
19334 new_target = gen_reg_rtx (mode);
19336 new_target = target;
19337 var = force_reg (GET_MODE_INNER (mode), var);
19338 x = gen_rtx_VEC_DUPLICATE (mode, var);
19339 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
19340 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
19343 /* We need to shuffle the value to the correct position, so
19344 create a new pseudo to store the intermediate result. */
19346 /* With SSE2, we can use the integer shuffle insns. */
19347 if (mode != V4SFmode && TARGET_SSE2)
19349 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
19351 GEN_INT (one_var == 1 ? 0 : 1),
19352 GEN_INT (one_var == 2 ? 0 : 1),
19353 GEN_INT (one_var == 3 ? 0 : 1)));
19354 if (target != new_target)
19355 emit_move_insn (target, new_target);
19359 /* Otherwise convert the intermediate result to V4SFmode and
19360 use the SSE1 shuffle instructions. */
19361 if (mode != V4SFmode)
19363 tmp = gen_reg_rtx (V4SFmode);
19364 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
19369 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
19371 GEN_INT (one_var == 1 ? 0 : 1),
19372 GEN_INT (one_var == 2 ? 0+4 : 1+4),
19373 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
19375 if (mode != V4SFmode)
19376 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
19377 else if (tmp != target)
19378 emit_move_insn (target, tmp);
19380 else if (target != new_target)
19381 emit_move_insn (target, new_target);
19386 vsimode = V4SImode;
19392 vsimode = V2SImode;
19398 /* Zero extend the variable element to SImode and recurse. */
19399 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
19401 x = gen_reg_rtx (vsimode);
19402 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
19404 gcc_unreachable ();
19406 emit_move_insn (target, gen_lowpart (mode, x));
19414 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19415 consisting of the values in VALS. It is known that all elements
19416 except ONE_VAR are constants. Return true if successful. */
19419 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
19420 rtx target, rtx vals, int one_var)
19422 rtx var = XVECEXP (vals, 0, one_var);
19423 enum machine_mode wmode;
19426 const_vec = copy_rtx (vals);
19427 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
19428 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
19436 /* For the two element vectors, it's just as easy to use
19437 the general case. */
19453 /* There's no way to set one QImode entry easily. Combine
19454 the variable value with its adjacent constant value, and
19455 promote to an HImode set. */
19456 x = XVECEXP (vals, 0, one_var ^ 1);
19459 var = convert_modes (HImode, QImode, var, true);
19460 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
19461 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19462 x = GEN_INT (INTVAL (x) & 0xff);
19466 var = convert_modes (HImode, QImode, var, true);
19467 x = gen_int_mode (INTVAL (x) << 8, HImode);
19469 if (x != const0_rtx)
19470 var = expand_simple_binop (HImode, IOR, var, x, var,
19471 1, OPTAB_LIB_WIDEN);
19473 x = gen_reg_rtx (wmode);
19474 emit_move_insn (x, gen_lowpart (wmode, const_vec));
19475 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
19477 emit_move_insn (target, gen_lowpart (mode, x));
19484 emit_move_insn (target, const_vec);
19485 ix86_expand_vector_set (mmx_ok, target, var, one_var);
19489 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
19490 all values variable, and none identical. */
19493 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
19494 rtx target, rtx vals)
19496 enum machine_mode half_mode = GET_MODE_INNER (mode);
19497 rtx op0 = NULL, op1 = NULL;
19498 bool use_vec_concat = false;
19504 if (!mmx_ok && !TARGET_SSE)
19510 /* For the two element vectors, we always implement VEC_CONCAT. */
19511 op0 = XVECEXP (vals, 0, 0);
19512 op1 = XVECEXP (vals, 0, 1);
19513 use_vec_concat = true;
19517 half_mode = V2SFmode;
19520 half_mode = V2SImode;
19526 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
19527 Recurse to load the two halves. */
19529 op0 = gen_reg_rtx (half_mode);
19530 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
19531 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
19533 op1 = gen_reg_rtx (half_mode);
19534 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
19535 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
19537 use_vec_concat = true;
19548 gcc_unreachable ();
19551 if (use_vec_concat)
19553 if (!register_operand (op0, half_mode))
19554 op0 = force_reg (half_mode, op0);
19555 if (!register_operand (op1, half_mode))
19556 op1 = force_reg (half_mode, op1);
19558 emit_insn (gen_rtx_SET (VOIDmode, target,
19559 gen_rtx_VEC_CONCAT (mode, op0, op1)));
19563 int i, j, n_elts, n_words, n_elt_per_word;
19564 enum machine_mode inner_mode;
19565 rtx words[4], shift;
19567 inner_mode = GET_MODE_INNER (mode);
19568 n_elts = GET_MODE_NUNITS (mode);
19569 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
19570 n_elt_per_word = n_elts / n_words;
19571 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
19573 for (i = 0; i < n_words; ++i)
19575 rtx word = NULL_RTX;
19577 for (j = 0; j < n_elt_per_word; ++j)
19579 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
19580 elt = convert_modes (word_mode, inner_mode, elt, true);
19586 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
19587 word, 1, OPTAB_LIB_WIDEN);
19588 word = expand_simple_binop (word_mode, IOR, word, elt,
19589 word, 1, OPTAB_LIB_WIDEN);
19597 emit_move_insn (target, gen_lowpart (mode, words[0]));
19598 else if (n_words == 2)
19600 rtx tmp = gen_reg_rtx (mode);
19601 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
19602 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
19603 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
19604 emit_move_insn (target, tmp);
19606 else if (n_words == 4)
19608 rtx tmp = gen_reg_rtx (V4SImode);
19609 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
19610 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
19611 emit_move_insn (target, gen_lowpart (mode, tmp));
19614 gcc_unreachable ();
19618 /* Initialize vector TARGET via VALS. Suppress the use of MMX
19619 instructions unless MMX_OK is true. */
19622 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
19624 enum machine_mode mode = GET_MODE (target);
19625 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19626 int n_elts = GET_MODE_NUNITS (mode);
19627 int n_var = 0, one_var = -1;
19628 bool all_same = true, all_const_zero = true;
19632 for (i = 0; i < n_elts; ++i)
19634 x = XVECEXP (vals, 0, i);
19635 if (!CONSTANT_P (x))
19636 n_var++, one_var = i;
19637 else if (x != CONST0_RTX (inner_mode))
19638 all_const_zero = false;
19639 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
19643 /* Constants are best loaded from the constant pool. */
19646 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
19650 /* If all values are identical, broadcast the value. */
19652 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
19653 XVECEXP (vals, 0, 0)))
19656 /* Values where only one field is non-constant are best loaded from
19657 the pool and overwritten via move later. */
19661 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
19662 XVECEXP (vals, 0, one_var),
19666 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
19670 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
19674 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
19676 enum machine_mode mode = GET_MODE (target);
19677 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19678 bool use_vec_merge = false;
19687 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
19688 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
19690 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
19692 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
19693 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19703 /* For the two element vectors, we implement a VEC_CONCAT with
19704 the extraction of the other element. */
19706 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
19707 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
19710 op0 = val, op1 = tmp;
19712 op0 = tmp, op1 = val;
19714 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
19715 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19723 use_vec_merge = true;
19727 /* tmp = target = A B C D */
19728 tmp = copy_to_reg (target);
19729 /* target = A A B B */
19730 emit_insn (gen_sse_unpcklps (target, target, target));
19731 /* target = X A B B */
19732 ix86_expand_vector_set (false, target, val, 0);
19733 /* target = A X C D */
19734 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19735 GEN_INT (1), GEN_INT (0),
19736 GEN_INT (2+4), GEN_INT (3+4)));
19740 /* tmp = target = A B C D */
19741 tmp = copy_to_reg (target);
19742 /* tmp = X B C D */
19743 ix86_expand_vector_set (false, tmp, val, 0);
19744 /* target = A B X D */
19745 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19746 GEN_INT (0), GEN_INT (1),
19747 GEN_INT (0+4), GEN_INT (3+4)));
19751 /* tmp = target = A B C D */
19752 tmp = copy_to_reg (target);
19753 /* tmp = X B C D */
19754 ix86_expand_vector_set (false, tmp, val, 0);
19755 /* target = A B X D */
19756 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19757 GEN_INT (0), GEN_INT (1),
19758 GEN_INT (2+4), GEN_INT (0+4)));
19762 gcc_unreachable ();
19767 /* Element 0 handled by vec_merge below. */
19770 use_vec_merge = true;
19776 /* With SSE2, use integer shuffles to swap element 0 and ELT,
19777 store into element 0, then shuffle them back. */
19781 order[0] = GEN_INT (elt);
19782 order[1] = const1_rtx;
19783 order[2] = const2_rtx;
19784 order[3] = GEN_INT (3);
19785 order[elt] = const0_rtx;
19787 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19788 order[1], order[2], order[3]));
19790 ix86_expand_vector_set (false, target, val, 0);
19792 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19793 order[1], order[2], order[3]));
19797 /* For SSE1, we have to reuse the V4SF code. */
19798 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
19799 gen_lowpart (SFmode, val), elt);
19804 use_vec_merge = TARGET_SSE2;
19807 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19818 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
19819 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
19820 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19824 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19826 emit_move_insn (mem, target);
19828 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19829 emit_move_insn (tmp, val);
19831 emit_move_insn (target, mem);
19836 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
19838 enum machine_mode mode = GET_MODE (vec);
19839 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19840 bool use_vec_extr = false;
19853 use_vec_extr = true;
19865 tmp = gen_reg_rtx (mode);
19866 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
19867 GEN_INT (elt), GEN_INT (elt),
19868 GEN_INT (elt+4), GEN_INT (elt+4)));
19872 tmp = gen_reg_rtx (mode);
19873 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
19877 gcc_unreachable ();
19880 use_vec_extr = true;
19895 tmp = gen_reg_rtx (mode);
19896 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
19897 GEN_INT (elt), GEN_INT (elt),
19898 GEN_INT (elt), GEN_INT (elt)));
19902 tmp = gen_reg_rtx (mode);
19903 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
19907 gcc_unreachable ();
19910 use_vec_extr = true;
19915 /* For SSE1, we have to reuse the V4SF code. */
19916 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
19917 gen_lowpart (V4SFmode, vec), elt);
19923 use_vec_extr = TARGET_SSE2;
19926 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19931 /* ??? Could extract the appropriate HImode element and shift. */
19938 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
19939 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
19941 /* Let the rtl optimizers know about the zero extension performed. */
19942 if (inner_mode == HImode)
19944 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
19945 target = gen_lowpart (SImode, target);
19948 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19952 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19954 emit_move_insn (mem, vec);
19956 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19957 emit_move_insn (target, tmp);
19961 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
19962 pattern to reduce; DEST is the destination; IN is the input vector. */
19965 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
19967 rtx tmp1, tmp2, tmp3;
19969 tmp1 = gen_reg_rtx (V4SFmode);
19970 tmp2 = gen_reg_rtx (V4SFmode);
19971 tmp3 = gen_reg_rtx (V4SFmode);
19973 emit_insn (gen_sse_movhlps (tmp1, in, in));
19974 emit_insn (fn (tmp2, tmp1, in));
19976 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
19977 GEN_INT (1), GEN_INT (1),
19978 GEN_INT (1+4), GEN_INT (1+4)));
19979 emit_insn (fn (dest, tmp2, tmp3));
19982 /* Target hook for scalar_mode_supported_p. */
19984 ix86_scalar_mode_supported_p (enum machine_mode mode)
19986 if (DECIMAL_FLOAT_MODE_P (mode))
19989 return default_scalar_mode_supported_p (mode);
19992 /* Implements target hook vector_mode_supported_p. */
19994 ix86_vector_mode_supported_p (enum machine_mode mode)
19996 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
19998 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
20000 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
20002 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
20007 /* Worker function for TARGET_MD_ASM_CLOBBERS.
20009 We do this in the new i386 backend to maintain source compatibility
20010 with the old cc0-based compiler. */
20013 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
20014 tree inputs ATTRIBUTE_UNUSED,
20017 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
20019 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
20024 /* Return true if this goes in small data/bss. */
20027 ix86_in_large_data_p (tree exp)
20029 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
20032 /* Functions are never large data. */
20033 if (TREE_CODE (exp) == FUNCTION_DECL)
20036 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
20038 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
20039 if (strcmp (section, ".ldata") == 0
20040 || strcmp (section, ".lbss") == 0)
20046 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
20048 /* If this is an incomplete type with size 0, then we can't put it
20049 in data because it might be too big when completed. */
20050 if (!size || size > ix86_section_threshold)
20057 ix86_encode_section_info (tree decl, rtx rtl, int first)
20059 default_encode_section_info (decl, rtl, first);
20061 if (TREE_CODE (decl) == VAR_DECL
20062 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
20063 && ix86_in_large_data_p (decl))
20064 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
20067 /* Worker function for REVERSE_CONDITION. */
20070 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
20072 return (mode != CCFPmode && mode != CCFPUmode
20073 ? reverse_condition (code)
20074 : reverse_condition_maybe_unordered (code));
20077 /* Output code to perform an x87 FP register move, from OPERANDS[1]
20081 output_387_reg_move (rtx insn, rtx *operands)
20083 if (REG_P (operands[1])
20084 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
20086 if (REGNO (operands[0]) == FIRST_STACK_REG)
20087 return output_387_ffreep (operands, 0);
20088 return "fstp\t%y0";
20090 if (STACK_TOP_P (operands[0]))
20091 return "fld%z1\t%y1";
20095 /* Output code to perform a conditional jump to LABEL, if C2 flag in
20096 FP status register is set. */
20099 ix86_emit_fp_unordered_jump (rtx label)
20101 rtx reg = gen_reg_rtx (HImode);
20104 emit_insn (gen_x86_fnstsw_1 (reg));
20106 if (TARGET_USE_SAHF)
20108 emit_insn (gen_x86_sahf_1 (reg));
20110 temp = gen_rtx_REG (CCmode, FLAGS_REG);
20111 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
20115 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
20117 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20118 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
20121 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
20122 gen_rtx_LABEL_REF (VOIDmode, label),
20124 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
20125 emit_jump_insn (temp);
20128 /* Output code to perform a log1p XFmode calculation. */
20130 void ix86_emit_i387_log1p (rtx op0, rtx op1)
20132 rtx label1 = gen_label_rtx ();
20133 rtx label2 = gen_label_rtx ();
20135 rtx tmp = gen_reg_rtx (XFmode);
20136 rtx tmp2 = gen_reg_rtx (XFmode);
20138 emit_insn (gen_absxf2 (tmp, op1));
20139 emit_insn (gen_cmpxf (tmp,
20140 CONST_DOUBLE_FROM_REAL_VALUE (
20141 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
20143 emit_jump_insn (gen_bge (label1));
20145 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20146 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
20147 emit_jump (label2);
20149 emit_label (label1);
20150 emit_move_insn (tmp, CONST1_RTX (XFmode));
20151 emit_insn (gen_addxf3 (tmp, op1, tmp));
20152 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20153 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
20155 emit_label (label2);
20158 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
20161 i386_solaris_elf_named_section (const char *name, unsigned int flags,
20164 /* With Binutils 2.15, the "@unwind" marker must be specified on
20165 every occurrence of the ".eh_frame" section, not just the first
20168 && strcmp (name, ".eh_frame") == 0)
20170 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
20171 flags & SECTION_WRITE ? "aw" : "a");
20174 default_elf_asm_named_section (name, flags, decl);
20177 /* Return the mangling of TYPE if it is an extended fundamental type. */
20179 static const char *
20180 ix86_mangle_fundamental_type (tree type)
20182 switch (TYPE_MODE (type))
20185 /* __float128 is "g". */
20188 /* "long double" or __float80 is "e". */
20195 /* For 32-bit code we can save PIC register setup by using
20196 __stack_chk_fail_local hidden function instead of calling
20197 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
20198 register, so it is better to call __stack_chk_fail directly. */
20201 ix86_stack_protect_fail (void)
20203 return TARGET_64BIT
20204 ? default_external_stack_protect_fail ()
20205 : default_hidden_stack_protect_fail ();
20208 /* Select a format to encode pointers in exception handling data. CODE
20209 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
20210 true if the symbol may be affected by dynamic relocations.
20212 ??? All x86 object file formats are capable of representing this.
20213 After all, the relocation needed is the same as for the call insn.
20214 Whether or not a particular assembler allows us to enter such, I
20215 guess we'll have to see. */
20217 asm_preferred_eh_data_format (int code, int global)
20221 int type = DW_EH_PE_sdata8;
20223 || ix86_cmodel == CM_SMALL_PIC
20224 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
20225 type = DW_EH_PE_sdata4;
20226 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
20228 if (ix86_cmodel == CM_SMALL
20229 || (ix86_cmodel == CM_MEDIUM && code))
20230 return DW_EH_PE_udata4;
20231 return DW_EH_PE_absptr;
20234 /* Expand copysign from SIGN to the positive value ABS_VALUE
20235 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
20238 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
20240 enum machine_mode mode = GET_MODE (sign);
20241 rtx sgn = gen_reg_rtx (mode);
20242 if (mask == NULL_RTX)
20244 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
20245 if (!VECTOR_MODE_P (mode))
20247 /* We need to generate a scalar mode mask in this case. */
20248 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20249 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20250 mask = gen_reg_rtx (mode);
20251 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20255 mask = gen_rtx_NOT (mode, mask);
20256 emit_insn (gen_rtx_SET (VOIDmode, sgn,
20257 gen_rtx_AND (mode, mask, sign)));
20258 emit_insn (gen_rtx_SET (VOIDmode, result,
20259 gen_rtx_IOR (mode, abs_value, sgn)));
20262 /* Expand fabs (OP0) and return a new rtx that holds the result. The
20263 mask for masking out the sign-bit is stored in *SMASK, if that is
20266 ix86_expand_sse_fabs (rtx op0, rtx *smask)
20268 enum machine_mode mode = GET_MODE (op0);
20271 xa = gen_reg_rtx (mode);
20272 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
20273 if (!VECTOR_MODE_P (mode))
20275 /* We need to generate a scalar mode mask in this case. */
20276 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20277 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20278 mask = gen_reg_rtx (mode);
20279 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20281 emit_insn (gen_rtx_SET (VOIDmode, xa,
20282 gen_rtx_AND (mode, op0, mask)));
20290 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
20291 swapping the operands if SWAP_OPERANDS is true. The expanded
20292 code is a forward jump to a newly created label in case the
20293 comparison is true. The generated label rtx is returned. */
20295 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
20296 bool swap_operands)
20307 label = gen_label_rtx ();
20308 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
20309 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20310 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
20311 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
20312 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
20313 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
20314 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
20315 JUMP_LABEL (tmp) = label;
20320 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
20321 using comparison code CODE. Operands are swapped for the comparison if
20322 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
20324 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
20325 bool swap_operands)
20327 enum machine_mode mode = GET_MODE (op0);
20328 rtx mask = gen_reg_rtx (mode);
20337 if (mode == DFmode)
20338 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
20339 gen_rtx_fmt_ee (code, mode, op0, op1)));
20341 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
20342 gen_rtx_fmt_ee (code, mode, op0, op1)));
20347 /* Generate and return a rtx of mode MODE for 2**n where n is the number
20348 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
20350 ix86_gen_TWO52 (enum machine_mode mode)
20352 REAL_VALUE_TYPE TWO52r;
20355 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
20356 TWO52 = const_double_from_real_value (TWO52r, mode);
20357 TWO52 = force_reg (mode, TWO52);
20362 /* Expand SSE sequence for computing lround from OP1 storing
20365 ix86_expand_lround (rtx op0, rtx op1)
20367 /* C code for the stuff we're doing below:
20368 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
20371 enum machine_mode mode = GET_MODE (op1);
20372 const struct real_format *fmt;
20373 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20376 /* load nextafter (0.5, 0.0) */
20377 fmt = REAL_MODE_FORMAT (mode);
20378 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20379 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20381 /* adj = copysign (0.5, op1) */
20382 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
20383 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
20385 /* adj = op1 + adj */
20386 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
20388 /* op0 = (imode)adj */
20389 expand_fix (op0, adj, 0);
20392 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
20395 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
20397 /* C code for the stuff we're doing below (for do_floor):
20399 xi -= (double)xi > op1 ? 1 : 0;
20402 enum machine_mode fmode = GET_MODE (op1);
20403 enum machine_mode imode = GET_MODE (op0);
20404 rtx ireg, freg, label, tmp;
20406 /* reg = (long)op1 */
20407 ireg = gen_reg_rtx (imode);
20408 expand_fix (ireg, op1, 0);
20410 /* freg = (double)reg */
20411 freg = gen_reg_rtx (fmode);
20412 expand_float (freg, ireg, 0);
20414 /* ireg = (freg > op1) ? ireg - 1 : ireg */
20415 label = ix86_expand_sse_compare_and_jump (UNLE,
20416 freg, op1, !do_floor);
20417 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
20418 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
20419 emit_move_insn (ireg, tmp);
20421 emit_label (label);
20422 LABEL_NUSES (label) = 1;
20424 emit_move_insn (op0, ireg);
20427 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
20428 result in OPERAND0. */
20430 ix86_expand_rint (rtx operand0, rtx operand1)
20432 /* C code for the stuff we're doing below:
20433 xa = fabs (operand1);
20434 if (!isless (xa, 2**52))
20436 xa = xa + 2**52 - 2**52;
20437 return copysign (xa, operand1);
20439 enum machine_mode mode = GET_MODE (operand0);
20440 rtx res, xa, label, TWO52, mask;
20442 res = gen_reg_rtx (mode);
20443 emit_move_insn (res, operand1);
20445 /* xa = abs (operand1) */
20446 xa = ix86_expand_sse_fabs (res, &mask);
20448 /* if (!isless (xa, TWO52)) goto label; */
20449 TWO52 = ix86_gen_TWO52 (mode);
20450 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20452 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20453 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20455 ix86_sse_copysign_to_positive (res, xa, res, mask);
20457 emit_label (label);
20458 LABEL_NUSES (label) = 1;
20460 emit_move_insn (operand0, res);
20463 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20466 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
20468 /* C code for the stuff we expand below.
20469 double xa = fabs (x), x2;
20470 if (!isless (xa, TWO52))
20472 xa = xa + TWO52 - TWO52;
20473 x2 = copysign (xa, x);
20482 enum machine_mode mode = GET_MODE (operand0);
20483 rtx xa, TWO52, tmp, label, one, res, mask;
20485 TWO52 = ix86_gen_TWO52 (mode);
20487 /* Temporary for holding the result, initialized to the input
20488 operand to ease control flow. */
20489 res = gen_reg_rtx (mode);
20490 emit_move_insn (res, operand1);
20492 /* xa = abs (operand1) */
20493 xa = ix86_expand_sse_fabs (res, &mask);
20495 /* if (!isless (xa, TWO52)) goto label; */
20496 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20498 /* xa = xa + TWO52 - TWO52; */
20499 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20500 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20502 /* xa = copysign (xa, operand1) */
20503 ix86_sse_copysign_to_positive (xa, xa, res, mask);
20505 /* generate 1.0 or -1.0 */
20506 one = force_reg (mode,
20507 const_double_from_real_value (do_floor
20508 ? dconst1 : dconstm1, mode));
20510 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20511 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20512 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20513 gen_rtx_AND (mode, one, tmp)));
20514 /* We always need to subtract here to preserve signed zero. */
20515 tmp = expand_simple_binop (mode, MINUS,
20516 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20517 emit_move_insn (res, tmp);
20519 emit_label (label);
20520 LABEL_NUSES (label) = 1;
20522 emit_move_insn (operand0, res);
20525 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20528 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
20530 /* C code for the stuff we expand below.
20531 double xa = fabs (x), x2;
20532 if (!isless (xa, TWO52))
20534 x2 = (double)(long)x;
20541 if (HONOR_SIGNED_ZEROS (mode))
20542 return copysign (x2, x);
20545 enum machine_mode mode = GET_MODE (operand0);
20546 rtx xa, xi, TWO52, tmp, label, one, res, mask;
20548 TWO52 = ix86_gen_TWO52 (mode);
20550 /* Temporary for holding the result, initialized to the input
20551 operand to ease control flow. */
20552 res = gen_reg_rtx (mode);
20553 emit_move_insn (res, operand1);
20555 /* xa = abs (operand1) */
20556 xa = ix86_expand_sse_fabs (res, &mask);
20558 /* if (!isless (xa, TWO52)) goto label; */
20559 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20561 /* xa = (double)(long)x */
20562 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20563 expand_fix (xi, res, 0);
20564 expand_float (xa, xi, 0);
20567 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20569 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20570 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20571 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20572 gen_rtx_AND (mode, one, tmp)));
20573 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
20574 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20575 emit_move_insn (res, tmp);
20577 if (HONOR_SIGNED_ZEROS (mode))
20578 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20580 emit_label (label);
20581 LABEL_NUSES (label) = 1;
20583 emit_move_insn (operand0, res);
20586 /* Expand SSE sequence for computing round from OPERAND1 storing
20587 into OPERAND0. Sequence that works without relying on DImode truncation
20588 via cvttsd2siq that is only available on 64bit targets. */
20590 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
20592 /* C code for the stuff we expand below.
20593 double xa = fabs (x), xa2, x2;
20594 if (!isless (xa, TWO52))
20596 Using the absolute value and copying back sign makes
20597 -0.0 -> -0.0 correct.
20598 xa2 = xa + TWO52 - TWO52;
20603 else if (dxa > 0.5)
20605 x2 = copysign (xa2, x);
20608 enum machine_mode mode = GET_MODE (operand0);
20609 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
20611 TWO52 = ix86_gen_TWO52 (mode);
20613 /* Temporary for holding the result, initialized to the input
20614 operand to ease control flow. */
20615 res = gen_reg_rtx (mode);
20616 emit_move_insn (res, operand1);
20618 /* xa = abs (operand1) */
20619 xa = ix86_expand_sse_fabs (res, &mask);
20621 /* if (!isless (xa, TWO52)) goto label; */
20622 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20624 /* xa2 = xa + TWO52 - TWO52; */
20625 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20626 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
20628 /* dxa = xa2 - xa; */
20629 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
20631 /* generate 0.5, 1.0 and -0.5 */
20632 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
20633 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
20634 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
20638 tmp = gen_reg_rtx (mode);
20639 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
20640 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
20641 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20642 gen_rtx_AND (mode, one, tmp)));
20643 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20644 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
20645 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
20646 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20647 gen_rtx_AND (mode, one, tmp)));
20648 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20650 /* res = copysign (xa2, operand1) */
20651 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
20653 emit_label (label);
20654 LABEL_NUSES (label) = 1;
20656 emit_move_insn (operand0, res);
20659 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20662 ix86_expand_trunc (rtx operand0, rtx operand1)
20664 /* C code for SSE variant we expand below.
20665 double xa = fabs (x), x2;
20666 if (!isless (xa, TWO52))
20668 x2 = (double)(long)x;
20669 if (HONOR_SIGNED_ZEROS (mode))
20670 return copysign (x2, x);
20673 enum machine_mode mode = GET_MODE (operand0);
20674 rtx xa, xi, TWO52, label, res, mask;
20676 TWO52 = ix86_gen_TWO52 (mode);
20678 /* Temporary for holding the result, initialized to the input
20679 operand to ease control flow. */
20680 res = gen_reg_rtx (mode);
20681 emit_move_insn (res, operand1);
20683 /* xa = abs (operand1) */
20684 xa = ix86_expand_sse_fabs (res, &mask);
20686 /* if (!isless (xa, TWO52)) goto label; */
20687 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20689 /* x = (double)(long)x */
20690 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20691 expand_fix (xi, res, 0);
20692 expand_float (res, xi, 0);
20694 if (HONOR_SIGNED_ZEROS (mode))
20695 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20697 emit_label (label);
20698 LABEL_NUSES (label) = 1;
20700 emit_move_insn (operand0, res);
20703 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20706 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
20708 enum machine_mode mode = GET_MODE (operand0);
20709 rtx xa, mask, TWO52, label, one, res, smask, tmp;
20711 /* C code for SSE variant we expand below.
20712 double xa = fabs (x), x2;
20713 if (!isless (xa, TWO52))
20715 xa2 = xa + TWO52 - TWO52;
20719 x2 = copysign (xa2, x);
20723 TWO52 = ix86_gen_TWO52 (mode);
20725 /* Temporary for holding the result, initialized to the input
20726 operand to ease control flow. */
20727 res = gen_reg_rtx (mode);
20728 emit_move_insn (res, operand1);
20730 /* xa = abs (operand1) */
20731 xa = ix86_expand_sse_fabs (res, &smask);
20733 /* if (!isless (xa, TWO52)) goto label; */
20734 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20736 /* res = xa + TWO52 - TWO52; */
20737 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20738 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
20739 emit_move_insn (res, tmp);
20742 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20744 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
20745 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
20746 emit_insn (gen_rtx_SET (VOIDmode, mask,
20747 gen_rtx_AND (mode, mask, one)));
20748 tmp = expand_simple_binop (mode, MINUS,
20749 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
20750 emit_move_insn (res, tmp);
20752 /* res = copysign (res, operand1) */
20753 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
20755 emit_label (label);
20756 LABEL_NUSES (label) = 1;
20758 emit_move_insn (operand0, res);
20761 /* Expand SSE sequence for computing round from OPERAND1 storing
20764 ix86_expand_round (rtx operand0, rtx operand1)
20766 /* C code for the stuff we're doing below:
20767 double xa = fabs (x);
20768 if (!isless (xa, TWO52))
20770 xa = (double)(long)(xa + nextafter (0.5, 0.0));
20771 return copysign (xa, x);
20773 enum machine_mode mode = GET_MODE (operand0);
20774 rtx res, TWO52, xa, label, xi, half, mask;
20775 const struct real_format *fmt;
20776 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20778 /* Temporary for holding the result, initialized to the input
20779 operand to ease control flow. */
20780 res = gen_reg_rtx (mode);
20781 emit_move_insn (res, operand1);
20783 TWO52 = ix86_gen_TWO52 (mode);
20784 xa = ix86_expand_sse_fabs (res, &mask);
20785 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20787 /* load nextafter (0.5, 0.0) */
20788 fmt = REAL_MODE_FORMAT (mode);
20789 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20790 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20792 /* xa = xa + 0.5 */
20793 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
20794 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
20796 /* xa = (double)(int64_t)xa */
20797 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20798 expand_fix (xi, xa, 0);
20799 expand_float (xa, xi, 0);
20801 /* res = copysign (xa, operand1) */
20802 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
20804 emit_label (label);
20805 LABEL_NUSES (label) = 1;
20807 emit_move_insn (operand0, res);
20810 #include "gt-i386.h"